GT AI OS Community Edition v2.0.33
Security hardening release addressing CodeQL and Dependabot alerts: - Fix stack trace exposure in error responses - Add SSRF protection with DNS resolution checking - Implement proper URL hostname validation (replaces substring matching) - Add centralized path sanitization to prevent path traversal - Fix ReDoS vulnerability in email validation regex - Improve HTML sanitization in validation utilities - Fix capability wildcard matching in auth utilities - Update glob dependency to address CVE - Add CodeQL suppression comments for verified false positives 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
282
apps/tenant-app/src/lib/__tests__/markdown-parser.test.ts
Normal file
282
apps/tenant-app/src/lib/__tests__/markdown-parser.test.ts
Normal file
@@ -0,0 +1,282 @@
|
||||
/**
|
||||
* Unit Tests for Markdown Parser
|
||||
*
|
||||
* Tests AST-based parsing for export functionality
|
||||
*/
|
||||
|
||||
import { parseMarkdown, extractInlineFormatting, parseMarkdownToAST } from '../markdown-parser';
|
||||
|
||||
describe('parseMarkdown', () => {
|
||||
describe('Links', () => {
|
||||
it('should extract simple links', () => {
|
||||
const content = 'This is a [test link](https://example.com) in text.';
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.links).toHaveLength(1);
|
||||
expect(result.links[0]).toMatchObject({
|
||||
text: 'test link',
|
||||
url: 'https://example.com',
|
||||
});
|
||||
});
|
||||
|
||||
it('should extract multiple links', () => {
|
||||
const content = '[Link 1](https://example.com) and [Link 2](https://google.com)';
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.links).toHaveLength(2);
|
||||
expect(result.links[0].url).toBe('https://example.com');
|
||||
expect(result.links[1].url).toBe('https://google.com');
|
||||
});
|
||||
|
||||
it('should extract links with titles', () => {
|
||||
const content = '[Link](https://example.com "Title text")';
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.links[0].title).toBe('Title text');
|
||||
});
|
||||
|
||||
it('should handle relative links', () => {
|
||||
const content = '[Docs](/docs/guide)';
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.links[0].url).toBe('/docs/guide');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Headers', () => {
|
||||
it('should extract headers of all levels', () => {
|
||||
const content = `
|
||||
# H1
|
||||
## H2
|
||||
### H3
|
||||
#### H4
|
||||
##### H5
|
||||
###### H6
|
||||
`;
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.headers).toHaveLength(6);
|
||||
expect(result.headers[0]).toMatchObject({ level: 1, text: 'H1' });
|
||||
expect(result.headers[1]).toMatchObject({ level: 2, text: 'H2' });
|
||||
expect(result.headers[5]).toMatchObject({ level: 6, text: 'H6' });
|
||||
});
|
||||
|
||||
it('should extract header text with inline formatting', () => {
|
||||
const content = '## Header with **bold** text';
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.headers[0].text).toBe('Header with bold text');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Code Blocks', () => {
|
||||
it('should extract code blocks with language', () => {
|
||||
const content = '```python\nprint("Hello")\n```';
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.codeBlocks).toHaveLength(1);
|
||||
expect(result.codeBlocks[0]).toMatchObject({
|
||||
language: 'python',
|
||||
code: 'print("Hello")',
|
||||
});
|
||||
});
|
||||
|
||||
it('should extract code blocks without language', () => {
|
||||
const content = '```\nplain code\n```';
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.codeBlocks[0].language).toBeNull();
|
||||
expect(result.codeBlocks[0].code).toBe('plain code');
|
||||
});
|
||||
|
||||
it('should separate Mermaid diagrams from regular code', () => {
|
||||
const content = `
|
||||
\`\`\`python
|
||||
print("code")
|
||||
\`\`\`
|
||||
|
||||
\`\`\`mermaid
|
||||
graph TD
|
||||
A --> B
|
||||
\`\`\`
|
||||
`;
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.codeBlocks).toHaveLength(1);
|
||||
expect(result.mermaidBlocks).toHaveLength(1);
|
||||
expect(result.codeBlocks[0].language).toBe('python');
|
||||
expect(result.mermaidBlocks[0].code).toContain('graph TD');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Mermaid Diagrams', () => {
|
||||
it('should extract Mermaid diagram code', () => {
|
||||
const content = `
|
||||
\`\`\`mermaid
|
||||
graph TD
|
||||
A[Start] --> B[End]
|
||||
\`\`\`
|
||||
`;
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.mermaidBlocks).toHaveLength(1);
|
||||
expect(result.mermaidBlocks[0].code).toContain('graph TD');
|
||||
expect(result.mermaidBlocks[0].code).toContain('A[Start]');
|
||||
});
|
||||
|
||||
it('should extract multiple Mermaid diagrams', () => {
|
||||
const content = `
|
||||
\`\`\`mermaid
|
||||
graph TD
|
||||
A --> B
|
||||
\`\`\`
|
||||
|
||||
\`\`\`mermaid
|
||||
sequenceDiagram
|
||||
User->>System: Request
|
||||
\`\`\`
|
||||
`;
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.mermaidBlocks).toHaveLength(2);
|
||||
expect(result.mermaidBlocks[0].code).toContain('graph TD');
|
||||
expect(result.mermaidBlocks[1].code).toContain('sequenceDiagram');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Tables', () => {
|
||||
it('should extract table headers and rows', () => {
|
||||
const content = `
|
||||
| Header 1 | Header 2 |
|
||||
|----------|----------|
|
||||
| Cell 1 | Cell 2 |
|
||||
| Cell 3 | Cell 4 |
|
||||
`;
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.tables).toHaveLength(1);
|
||||
expect(result.tables[0].headers).toEqual(['Header 1', 'Header 2']);
|
||||
expect(result.tables[0].rows).toHaveLength(2);
|
||||
expect(result.tables[0].rows[0]).toEqual(['Cell 1', 'Cell 2']);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Lists', () => {
|
||||
it('should extract unordered lists', () => {
|
||||
const content = `
|
||||
- Item 1
|
||||
- Item 2
|
||||
- Item 3
|
||||
`;
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.lists).toHaveLength(1);
|
||||
expect(result.lists[0].type).toBe('unordered');
|
||||
expect(result.lists[0].items).toHaveLength(3);
|
||||
});
|
||||
|
||||
it('should extract ordered lists', () => {
|
||||
const content = `
|
||||
1. First
|
||||
2. Second
|
||||
3. Third
|
||||
`;
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.lists).toHaveLength(1);
|
||||
expect(result.lists[0].type).toBe('ordered');
|
||||
expect(result.lists[0].items).toHaveLength(3);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Blockquotes', () => {
|
||||
it('should extract blockquote text', () => {
|
||||
const content = '> This is a quote';
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.blockquotes).toHaveLength(1);
|
||||
expect(result.blockquotes[0].text).toBe('This is a quote');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Character Detection', () => {
|
||||
it('should detect emoji', () => {
|
||||
const content = 'Hello 😀 world 🚀';
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.hasEmoji).toBe(true);
|
||||
});
|
||||
|
||||
it('should not detect emoji in regular text', () => {
|
||||
const content = 'Hello world';
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.hasEmoji).toBe(false);
|
||||
});
|
||||
|
||||
it('should detect CJK characters', () => {
|
||||
const content = 'Hello 你好 world';
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.hasUnsupportedChars).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect RTL characters', () => {
|
||||
const content = 'Hello مرحبا world';
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.hasUnsupportedChars).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Handling', () => {
|
||||
it('should throw on invalid input', () => {
|
||||
expect(() => parseMarkdown('')).toThrow('Invalid markdown content');
|
||||
expect(() => parseMarkdown(null as any)).toThrow('Invalid markdown content');
|
||||
expect(() => parseMarkdown(undefined as any)).toThrow('Invalid markdown content');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Edge Cases', () => {
|
||||
it('should handle nested formatting', () => {
|
||||
const content = '**bold with *italic* inside**';
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
// Parser should extract text even with nested formatting
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
|
||||
it('should handle empty code blocks', () => {
|
||||
const content = '```\n\n```';
|
||||
const result = parseMarkdown(content);
|
||||
|
||||
expect(result.codeBlocks).toHaveLength(1);
|
||||
expect(result.codeBlocks[0].code).toBe('');
|
||||
});
|
||||
|
||||
it('should handle malformed markdown gracefully', () => {
|
||||
const content = '[Unclosed link(https://example.com';
|
||||
|
||||
// Should not throw, remark is forgiving
|
||||
const result = parseMarkdown(content);
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseMarkdownToAST', () => {
|
||||
it('should return valid AST', () => {
|
||||
const content = '# Header\n\nParagraph';
|
||||
const ast = parseMarkdownToAST(content);
|
||||
|
||||
expect(ast.type).toBe('root');
|
||||
expect(ast.children).toBeDefined();
|
||||
expect(ast.children.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should throw on parsing errors', () => {
|
||||
// remark is very forgiving, so this is hard to trigger
|
||||
// but we test the error handling path exists
|
||||
expect(() => parseMarkdownToAST(null as any)).toThrow();
|
||||
});
|
||||
});
|
||||
42
apps/tenant-app/src/lib/access-helpers.ts
Normal file
42
apps/tenant-app/src/lib/access-helpers.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
/**
|
||||
* Access Level Display Helpers
|
||||
*
|
||||
* Provides consistent display mapping for access levels across the application.
|
||||
* Backend uses 'individual', 'team', 'organization' but UX displays 'Myself', 'Team', 'Organization'.
|
||||
*/
|
||||
|
||||
export type AccessLevel = 'individual' | 'team' | 'organization';
|
||||
|
||||
/**
|
||||
* Get user-friendly display name for access level
|
||||
*/
|
||||
export function getAccessLevelDisplay(level: AccessLevel): string {
|
||||
const displayMap: Record<AccessLevel, string> = {
|
||||
'individual': 'Myself',
|
||||
'team': 'Team',
|
||||
'organization': 'Organization'
|
||||
};
|
||||
return displayMap[level] || level;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get access level description for UI
|
||||
*/
|
||||
export function getAccessLevelDescription(level: AccessLevel, context: 'agent' | 'dataset'): string {
|
||||
const descriptions: Record<AccessLevel, Record<string, string>> = {
|
||||
'individual': {
|
||||
'agent': 'Only you can access this Agent',
|
||||
'dataset': 'Only you can access this dataset'
|
||||
},
|
||||
'team': {
|
||||
'agent': 'Share with specific Team members',
|
||||
'dataset': 'Share with a group of users'
|
||||
},
|
||||
'organization': {
|
||||
'agent': 'Available to all Organization users',
|
||||
'dataset': 'This dataset is available to all users in your Organization'
|
||||
}
|
||||
};
|
||||
|
||||
return descriptions[level]?.[context] || '';
|
||||
}
|
||||
678
apps/tenant-app/src/lib/animations/gt-animations.ts
Normal file
678
apps/tenant-app/src/lib/animations/gt-animations.ts
Normal file
@@ -0,0 +1,678 @@
|
||||
/**
|
||||
* GT 2.0 Animation Library
|
||||
* Apple-inspired physics with AI-specific animations for premium UX
|
||||
*/
|
||||
|
||||
import { Variants, Transition } from 'framer-motion';
|
||||
|
||||
// Animation timing functions (Apple-inspired)
|
||||
export const easing = {
|
||||
// Standard Apple easing curve - used for most UI animations
|
||||
standard: [0.4, 0.0, 0.2, 1] as [number, number, number, number],
|
||||
// Spring physics for interactive elements
|
||||
spring: [0.34, 1.56, 0.64, 1] as [number, number, number, number],
|
||||
// Smooth deceleration for entering elements
|
||||
decelerate: [0.0, 0.0, 0.2, 1] as [number, number, number, number],
|
||||
// Quick acceleration for exiting elements
|
||||
accelerate: [0.4, 0.0, 1, 1] as [number, number, number, number],
|
||||
// Gentle bounce for success states
|
||||
bounce: [0.68, -0.55, 0.265, 1.55] as [number, number, number, number],
|
||||
};
|
||||
|
||||
// Neural pulse animation for AI thinking states
|
||||
export const neuralPulse: Variants = {
|
||||
initial: {
|
||||
scale: 1,
|
||||
opacity: 0.3,
|
||||
},
|
||||
animate: {
|
||||
scale: [1, 1.2, 1],
|
||||
opacity: [0.3, 1, 0.3],
|
||||
transition: {
|
||||
duration: 1.4,
|
||||
repeat: Infinity,
|
||||
ease: easing.spring,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Synaptic flow for data connections and network animations
|
||||
export const synapticFlow: Variants = {
|
||||
initial: {
|
||||
pathLength: 0,
|
||||
opacity: 0,
|
||||
},
|
||||
animate: {
|
||||
pathLength: 1,
|
||||
opacity: [0, 1, 1, 0],
|
||||
transition: {
|
||||
pathLength: {
|
||||
duration: 2,
|
||||
ease: "easeInOut",
|
||||
},
|
||||
opacity: {
|
||||
duration: 2,
|
||||
times: [0, 0.2, 0.8, 1],
|
||||
},
|
||||
repeat: Infinity,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Glass morphism fade-in for modern UI elements
|
||||
export const glassMorphism: Variants = {
|
||||
initial: {
|
||||
opacity: 0,
|
||||
backdropFilter: "blur(0px)",
|
||||
background: "rgba(255, 255, 255, 0)",
|
||||
},
|
||||
animate: {
|
||||
opacity: 1,
|
||||
backdropFilter: "blur(10px)",
|
||||
background: "rgba(255, 255, 255, 0.1)",
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
ease: easing.standard,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Message bubble appearance with bounce
|
||||
export const messageBubble: Variants = {
|
||||
initial: {
|
||||
opacity: 0,
|
||||
scale: 0.8,
|
||||
y: 20,
|
||||
},
|
||||
animate: {
|
||||
opacity: 1,
|
||||
scale: 1,
|
||||
y: 0,
|
||||
transition: {
|
||||
duration: 0.4,
|
||||
ease: easing.bounce,
|
||||
}
|
||||
},
|
||||
exit: {
|
||||
opacity: 0,
|
||||
scale: 0.9,
|
||||
y: -10,
|
||||
transition: {
|
||||
duration: 0.2,
|
||||
ease: easing.accelerate,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Staggered list animation for sequential reveals
|
||||
export const staggerContainer: Variants = {
|
||||
initial: {},
|
||||
animate: {
|
||||
transition: {
|
||||
staggerChildren: 0.1,
|
||||
delayChildren: 0.2,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
export const staggerItem: Variants = {
|
||||
initial: {
|
||||
opacity: 0,
|
||||
x: -20,
|
||||
},
|
||||
animate: {
|
||||
opacity: 1,
|
||||
x: 0,
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
ease: easing.decelerate,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Confidence meter animation with color transitions
|
||||
export const confidenceMeter: Variants = {
|
||||
low: {
|
||||
width: "25%",
|
||||
backgroundColor: "#ef4444",
|
||||
transition: { duration: 0.5, ease: easing.standard }
|
||||
},
|
||||
medium: {
|
||||
width: "50%",
|
||||
backgroundColor: "#fbbf24",
|
||||
transition: { duration: 0.5, ease: easing.standard }
|
||||
},
|
||||
high: {
|
||||
width: "75%",
|
||||
backgroundColor: "#4ade80",
|
||||
transition: { duration: 0.5, ease: easing.standard }
|
||||
},
|
||||
veryHigh: {
|
||||
width: "100%",
|
||||
backgroundColor: "#00d084",
|
||||
transition: { duration: 0.5, ease: easing.standard }
|
||||
}
|
||||
};
|
||||
|
||||
// Avatar personality animations - each type has unique characteristics
|
||||
export const avatarAnimations = {
|
||||
geometric: {
|
||||
idle: {
|
||||
rotate: [0, 90, 180, 270, 360],
|
||||
transition: {
|
||||
duration: 20,
|
||||
repeat: Infinity,
|
||||
ease: "linear",
|
||||
}
|
||||
},
|
||||
thinking: {
|
||||
rotate: [0, -10, 10, -10, 0],
|
||||
scale: [1, 1.1, 0.9, 1.1, 1],
|
||||
transition: {
|
||||
duration: 0.5,
|
||||
repeat: Infinity,
|
||||
ease: easing.spring,
|
||||
}
|
||||
},
|
||||
speaking: {
|
||||
scale: [1, 1.05, 1],
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
repeat: Infinity,
|
||||
ease: easing.standard,
|
||||
}
|
||||
},
|
||||
success: {
|
||||
scale: [1, 1.2, 1],
|
||||
rotate: [0, 360],
|
||||
transition: {
|
||||
duration: 0.6,
|
||||
ease: easing.bounce,
|
||||
}
|
||||
}
|
||||
},
|
||||
organic: {
|
||||
idle: {
|
||||
scale: [1, 1.02, 1],
|
||||
transition: {
|
||||
duration: 4,
|
||||
repeat: Infinity,
|
||||
ease: "easeInOut",
|
||||
}
|
||||
},
|
||||
thinking: {
|
||||
scale: [1, 1.2, 1.1, 1.2, 1],
|
||||
opacity: [1, 0.8, 1, 0.8, 1],
|
||||
transition: {
|
||||
duration: 2,
|
||||
repeat: Infinity,
|
||||
ease: easing.decelerate,
|
||||
}
|
||||
},
|
||||
speaking: {
|
||||
scale: [1, 1.1, 1.05, 1.1, 1],
|
||||
transition: {
|
||||
duration: 0.8,
|
||||
repeat: Infinity,
|
||||
ease: "easeInOut",
|
||||
}
|
||||
},
|
||||
success: {
|
||||
scale: [1, 1.3, 1],
|
||||
opacity: [1, 0.7, 1],
|
||||
transition: {
|
||||
duration: 0.8,
|
||||
ease: easing.bounce,
|
||||
}
|
||||
}
|
||||
},
|
||||
minimal: {
|
||||
idle: {
|
||||
opacity: [0.7, 1, 0.7],
|
||||
transition: {
|
||||
duration: 3,
|
||||
repeat: Infinity,
|
||||
ease: "easeInOut",
|
||||
}
|
||||
},
|
||||
thinking: {
|
||||
opacity: [1, 0.3, 1],
|
||||
transition: {
|
||||
duration: 1,
|
||||
repeat: Infinity,
|
||||
ease: easing.standard,
|
||||
}
|
||||
},
|
||||
speaking: {
|
||||
opacity: [1, 0.9, 1],
|
||||
transition: {
|
||||
duration: 0.5,
|
||||
repeat: Infinity,
|
||||
ease: easing.standard,
|
||||
}
|
||||
},
|
||||
success: {
|
||||
opacity: [0.7, 1, 0.7],
|
||||
scale: [1, 1.1, 1],
|
||||
transition: {
|
||||
duration: 0.4,
|
||||
ease: easing.standard,
|
||||
}
|
||||
}
|
||||
},
|
||||
technical: {
|
||||
idle: {
|
||||
rotateY: [0, 360],
|
||||
transition: {
|
||||
duration: 10,
|
||||
repeat: Infinity,
|
||||
ease: "linear",
|
||||
}
|
||||
},
|
||||
thinking: {
|
||||
rotateX: [-5, 5, -5],
|
||||
rotateY: [-5, 5, -5],
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
repeat: Infinity,
|
||||
ease: easing.spring,
|
||||
}
|
||||
},
|
||||
speaking: {
|
||||
rotateZ: [-2, 2, -2],
|
||||
scale: [1, 1.02, 1],
|
||||
transition: {
|
||||
duration: 0.2,
|
||||
repeat: Infinity,
|
||||
ease: easing.standard,
|
||||
}
|
||||
},
|
||||
success: {
|
||||
rotateY: [0, 720],
|
||||
scale: [1, 1.15, 1],
|
||||
transition: {
|
||||
duration: 1,
|
||||
ease: easing.bounce,
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Typing animation for character-by-character text reveal
|
||||
export const typingReveal = {
|
||||
hidden: {
|
||||
opacity: 0,
|
||||
display: "none",
|
||||
},
|
||||
visible: (i: number) => ({
|
||||
opacity: 1,
|
||||
display: "inline",
|
||||
transition: {
|
||||
delay: i * 0.03,
|
||||
duration: 0.1,
|
||||
}
|
||||
})
|
||||
};
|
||||
|
||||
// Network graph animations for RAG visualization
|
||||
export const networkNode: Variants = {
|
||||
initial: {
|
||||
scale: 0,
|
||||
opacity: 0,
|
||||
},
|
||||
animate: {
|
||||
scale: 1,
|
||||
opacity: 1,
|
||||
transition: {
|
||||
duration: 0.5,
|
||||
ease: easing.spring,
|
||||
}
|
||||
},
|
||||
hover: {
|
||||
scale: 1.2,
|
||||
transition: {
|
||||
duration: 0.2,
|
||||
ease: easing.standard,
|
||||
}
|
||||
},
|
||||
selected: {
|
||||
scale: 1.3,
|
||||
boxShadow: "0 0 20px rgba(0, 208, 132, 0.5)",
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
ease: easing.spring,
|
||||
}
|
||||
},
|
||||
exit: {
|
||||
scale: 0,
|
||||
opacity: 0,
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
ease: easing.accelerate,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Citation and reference card animations
|
||||
export const citationCard: Variants = {
|
||||
initial: {
|
||||
opacity: 0,
|
||||
y: 10,
|
||||
scale: 0.95,
|
||||
},
|
||||
animate: {
|
||||
opacity: 1,
|
||||
y: 0,
|
||||
scale: 1,
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
ease: easing.decelerate,
|
||||
}
|
||||
},
|
||||
hover: {
|
||||
y: -2,
|
||||
boxShadow: "0 10px 30px rgba(0, 208, 132, 0.15)",
|
||||
transition: {
|
||||
duration: 0.2,
|
||||
ease: easing.standard,
|
||||
}
|
||||
},
|
||||
tap: {
|
||||
scale: 0.98,
|
||||
transition: {
|
||||
duration: 0.1,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Loading skeleton shimmer animation
|
||||
export const skeleton: Variants = {
|
||||
initial: {
|
||||
backgroundPosition: "-200% 0",
|
||||
},
|
||||
animate: {
|
||||
backgroundPosition: "200% 0",
|
||||
transition: {
|
||||
duration: 1.5,
|
||||
repeat: Infinity,
|
||||
ease: "linear",
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Neural border animation for active/focused elements
|
||||
export const neuralBorder = {
|
||||
initial: {
|
||||
borderColor: "rgba(0, 208, 132, 0)",
|
||||
},
|
||||
animate: {
|
||||
borderColor: [
|
||||
"rgba(0, 208, 132, 0)",
|
||||
"rgba(0, 208, 132, 0.5)",
|
||||
"rgba(0, 208, 132, 1)",
|
||||
"rgba(0, 208, 132, 0.5)",
|
||||
"rgba(0, 208, 132, 0)",
|
||||
],
|
||||
transition: {
|
||||
duration: 2,
|
||||
repeat: Infinity,
|
||||
ease: "easeInOut",
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Slide transitions for panel and modal animations
|
||||
export const slideUp: Variants = {
|
||||
initial: {
|
||||
y: "100%",
|
||||
opacity: 0,
|
||||
},
|
||||
animate: {
|
||||
y: 0,
|
||||
opacity: 1,
|
||||
transition: {
|
||||
duration: 0.4,
|
||||
ease: easing.decelerate,
|
||||
}
|
||||
},
|
||||
exit: {
|
||||
y: "100%",
|
||||
opacity: 0,
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
ease: easing.accelerate,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
export const slideRight: Variants = {
|
||||
initial: {
|
||||
x: "-100%",
|
||||
opacity: 0,
|
||||
},
|
||||
animate: {
|
||||
x: 0,
|
||||
opacity: 1,
|
||||
transition: {
|
||||
duration: 0.4,
|
||||
ease: easing.decelerate,
|
||||
}
|
||||
},
|
||||
exit: {
|
||||
x: "-100%",
|
||||
opacity: 0,
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
ease: easing.accelerate,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
export const slideLeft: Variants = {
|
||||
initial: {
|
||||
x: "100%",
|
||||
opacity: 0,
|
||||
},
|
||||
animate: {
|
||||
x: 0,
|
||||
opacity: 1,
|
||||
transition: {
|
||||
duration: 0.4,
|
||||
ease: easing.decelerate,
|
||||
}
|
||||
},
|
||||
exit: {
|
||||
x: "100%",
|
||||
opacity: 0,
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
ease: easing.accelerate,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Scale animations for buttons and interactive elements
|
||||
export const scaleOnHover: Variants = {
|
||||
initial: { scale: 1 },
|
||||
hover: {
|
||||
scale: 1.05,
|
||||
transition: {
|
||||
duration: 0.2,
|
||||
ease: easing.standard,
|
||||
}
|
||||
},
|
||||
tap: {
|
||||
scale: 0.95,
|
||||
transition: {
|
||||
duration: 0.1,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Fade and blur effects for overlays
|
||||
export const fadeBlur: Variants = {
|
||||
initial: {
|
||||
opacity: 0,
|
||||
backdropFilter: "blur(0px)",
|
||||
},
|
||||
animate: {
|
||||
opacity: 1,
|
||||
backdropFilter: "blur(8px)",
|
||||
transition: {
|
||||
duration: 0.2,
|
||||
ease: easing.standard,
|
||||
}
|
||||
},
|
||||
exit: {
|
||||
opacity: 0,
|
||||
backdropFilter: "blur(0px)",
|
||||
transition: {
|
||||
duration: 0.2,
|
||||
ease: easing.accelerate,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Export animation utilities
|
||||
export const animationUtils = {
|
||||
// Check if user prefers reduced motion
|
||||
shouldReduceMotion: (): boolean => {
|
||||
if (typeof window === 'undefined') return false;
|
||||
return window.matchMedia('(prefers-reduced-motion: reduce)').matches;
|
||||
},
|
||||
|
||||
// Get appropriate animation based on reduced motion preference
|
||||
getAnimation: (animation: Variants): Variants => {
|
||||
if (animationUtils.shouldReduceMotion()) {
|
||||
return {
|
||||
initial: animation.initial,
|
||||
animate: {
|
||||
...animation.animate,
|
||||
transition: { duration: 0 }
|
||||
}
|
||||
};
|
||||
}
|
||||
return animation;
|
||||
},
|
||||
|
||||
// Create custom spring animation with physics
|
||||
createSpring: (stiffness = 300, damping = 30, mass = 1): Transition => ({
|
||||
type: "spring",
|
||||
stiffness,
|
||||
damping,
|
||||
mass,
|
||||
}),
|
||||
|
||||
// Create custom tween animation
|
||||
createTween: (duration = 0.3, ease = easing.standard): Transition => ({
|
||||
type: "tween",
|
||||
duration,
|
||||
ease,
|
||||
}),
|
||||
|
||||
// Create staggered animation for lists
|
||||
createStagger: (staggerDelay = 0.1, delayChildren = 0): Transition => ({
|
||||
staggerChildren: staggerDelay,
|
||||
delayChildren,
|
||||
}),
|
||||
|
||||
// Get confidence animation based on score
|
||||
getConfidenceAnimation: (score: number): string => {
|
||||
if (score > 0.9) return 'veryHigh';
|
||||
if (score > 0.7) return 'high';
|
||||
if (score > 0.5) return 'medium';
|
||||
return 'low';
|
||||
},
|
||||
|
||||
// Get personality animation set
|
||||
getPersonalityAnimations: (personality: 'geometric' | 'organic' | 'minimal' | 'technical') => {
|
||||
return avatarAnimations[personality] || avatarAnimations.minimal;
|
||||
},
|
||||
|
||||
// Create dynamic color animation
|
||||
createColorTransition: (fromColor: string, toColor: string, duration = 0.3): Transition => ({
|
||||
duration,
|
||||
ease: easing.standard,
|
||||
}),
|
||||
};
|
||||
|
||||
// Preset animation combinations for common use cases
|
||||
export const presets = {
|
||||
// Card hover effect
|
||||
cardHover: {
|
||||
...scaleOnHover,
|
||||
hover: {
|
||||
...scaleOnHover.hover,
|
||||
boxShadow: "0 20px 40px rgba(0, 0, 0, 0.1)",
|
||||
}
|
||||
},
|
||||
|
||||
// Modal enter/exit
|
||||
modal: {
|
||||
initial: { opacity: 0, scale: 0.95 },
|
||||
animate: {
|
||||
opacity: 1,
|
||||
scale: 1,
|
||||
transition: {
|
||||
duration: 0.2,
|
||||
ease: easing.decelerate,
|
||||
}
|
||||
},
|
||||
exit: {
|
||||
opacity: 0,
|
||||
scale: 0.95,
|
||||
transition: {
|
||||
duration: 0.15,
|
||||
ease: easing.accelerate,
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// Success notification
|
||||
successNotification: {
|
||||
initial: { opacity: 0, x: 100, scale: 0.3 },
|
||||
animate: {
|
||||
opacity: 1,
|
||||
x: 0,
|
||||
scale: 1,
|
||||
transition: {
|
||||
type: "spring",
|
||||
stiffness: 500,
|
||||
damping: 30,
|
||||
}
|
||||
},
|
||||
exit: {
|
||||
opacity: 0,
|
||||
x: 100,
|
||||
transition: {
|
||||
duration: 0.2,
|
||||
ease: easing.accelerate,
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
// Export everything for easy importing
|
||||
export default {
|
||||
easing,
|
||||
neuralPulse,
|
||||
synapticFlow,
|
||||
glassMorphism,
|
||||
messageBubble,
|
||||
staggerContainer,
|
||||
staggerItem,
|
||||
confidenceMeter,
|
||||
avatarAnimations,
|
||||
typingReveal,
|
||||
networkNode,
|
||||
citationCard,
|
||||
skeleton,
|
||||
neuralBorder,
|
||||
slideUp,
|
||||
slideRight,
|
||||
slideLeft,
|
||||
scaleOnHover,
|
||||
fadeBlur,
|
||||
animationUtils,
|
||||
presets,
|
||||
};
|
||||
449
apps/tenant-app/src/lib/api.ts
Normal file
449
apps/tenant-app/src/lib/api.ts
Normal file
@@ -0,0 +1,449 @@
|
||||
import { mockApi } from './mock-api';
|
||||
import { isTokenExpired, getAuthToken } from '@/services/auth';
|
||||
|
||||
// Enable mock mode when backend is not available
|
||||
const USE_MOCK_API = process.env.NEXT_PUBLIC_USE_MOCK_API === 'true';
|
||||
|
||||
// API client configuration - use relative paths, proxied by Next.js
|
||||
const API_BASE_URL = '';
|
||||
|
||||
/**
|
||||
* Handle server-side session signals (OWASP/NIST compliance - Issue #264)
|
||||
* The server is the authoritative source of truth for session state.
|
||||
* This function processes X-Session-Warning and X-Session-Expired headers.
|
||||
*/
|
||||
function handleSessionHeaders(response: Response): void {
|
||||
if (typeof window === 'undefined') return;
|
||||
|
||||
// Check for session expiration signal from server
|
||||
const sessionExpired = response.headers.get('X-Session-Expired');
|
||||
if (sessionExpired) {
|
||||
// Server says session is expired - dispatch event for IdleTimerProvider
|
||||
window.dispatchEvent(new CustomEvent('session-expired', {
|
||||
detail: { reason: sessionExpired }
|
||||
}));
|
||||
return;
|
||||
}
|
||||
|
||||
// Check for session warning signal from server
|
||||
const sessionWarning = response.headers.get('X-Session-Warning');
|
||||
if (sessionWarning) {
|
||||
const secondsRemaining = parseInt(sessionWarning, 10);
|
||||
if (!isNaN(secondsRemaining)) {
|
||||
// Server says session is about to expire - dispatch event for IdleTimerProvider
|
||||
window.dispatchEvent(new CustomEvent('session-warning', {
|
||||
detail: { secondsRemaining }
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle session expired response (401 with X-Session-Expired header)
|
||||
*/
|
||||
function handleSessionExpiredResponse(response: Response): void {
|
||||
if (typeof window === 'undefined') return;
|
||||
|
||||
const sessionExpired = response.headers.get('X-Session-Expired');
|
||||
if (response.status === 401 && sessionExpired) {
|
||||
// Clear storage and redirect to login with session expired indicator
|
||||
localStorage.removeItem('gt2_token');
|
||||
localStorage.removeItem('gt2_user');
|
||||
localStorage.removeItem('gt2_tenant');
|
||||
window.location.href = `/auth/login?session_expired=${sessionExpired}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function for API calls
|
||||
async function apiCall(endpoint: string, options: RequestInit = {}) {
|
||||
const token = typeof window !== 'undefined' ? getAuthToken() : null;
|
||||
|
||||
// Note: We no longer check client-side token expiry here as the server is authoritative
|
||||
// The server-side session validation middleware will return appropriate headers
|
||||
|
||||
const headers: HeadersInit = {
|
||||
'Content-Type': 'application/json',
|
||||
...(token && { Authorization: `Bearer ${token}` }),
|
||||
...options.headers,
|
||||
};
|
||||
|
||||
const response = await fetch(`${API_BASE_URL}${endpoint}`, {
|
||||
...options,
|
||||
headers,
|
||||
});
|
||||
|
||||
// Process server-side session signals (Issue #264)
|
||||
handleSessionHeaders(response);
|
||||
|
||||
if (!response.ok) {
|
||||
// Handle session expired responses specially
|
||||
handleSessionExpiredResponse(response);
|
||||
throw new Error(`API Error: ${response.status} ${response.statusText}`);
|
||||
}
|
||||
|
||||
return response.json();
|
||||
}
|
||||
|
||||
// API endpoints
|
||||
export const api = {
|
||||
// Auth endpoints
|
||||
auth: {
|
||||
login: async (email: string, password: string) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.auth.login(email, password)
|
||||
: apiCall('/api/v1/auth/login', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ email, password }),
|
||||
}),
|
||||
|
||||
logout: async () =>
|
||||
USE_MOCK_API
|
||||
? mockApi.auth.logout()
|
||||
: apiCall('/api/v1/auth/logout', { method: 'POST' }),
|
||||
|
||||
getProfile: async () =>
|
||||
USE_MOCK_API
|
||||
? mockApi.auth.getProfile()
|
||||
: apiCall('/api/v1/user/profile'),
|
||||
},
|
||||
|
||||
// Chat/Conversation endpoints
|
||||
conversations: {
|
||||
list: async () =>
|
||||
USE_MOCK_API
|
||||
? mockApi.conversations.list()
|
||||
: apiCall('/api/v1/conversations'),
|
||||
|
||||
create: async (data: any) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.conversations.create(data)
|
||||
: apiCall('/api/v1/conversations', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(data),
|
||||
}),
|
||||
|
||||
get: async (id: string) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.conversations.get(id)
|
||||
: apiCall(`/api/v1/conversations/${id}`),
|
||||
|
||||
getMessages: async (id: string) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.conversations.getMessages(id)
|
||||
: apiCall(`/api/v1/conversations/${id}/messages`),
|
||||
|
||||
sendMessage: async (conversationId: string, content: string) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.conversations.sendMessage(conversationId, content)
|
||||
: apiCall(`/api/v1/conversations/${conversationId}/messages`, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ content }),
|
||||
}),
|
||||
},
|
||||
|
||||
// Agent endpoints
|
||||
agents: {
|
||||
list: async () =>
|
||||
USE_MOCK_API
|
||||
? mockApi.agents.list()
|
||||
: apiCall('/api/v1/agents'),
|
||||
|
||||
create: async (data: any) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.agents.create(data)
|
||||
: apiCall('/api/v1/agents', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(data),
|
||||
}),
|
||||
|
||||
get: async (id: string) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.agents.get(id)
|
||||
: apiCall(`/api/v1/agents/${id}`),
|
||||
|
||||
update: async (id: string, data: any) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.agents.update(id, data)
|
||||
: apiCall(`/api/v1/agents/${id}`, {
|
||||
method: 'PUT',
|
||||
body: JSON.stringify(data),
|
||||
}),
|
||||
|
||||
delete: async (id: string) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.agents.delete(id)
|
||||
: apiCall(`/api/v1/agents/${id}`, {
|
||||
method: 'DELETE',
|
||||
}),
|
||||
},
|
||||
|
||||
// Document endpoints
|
||||
documents: {
|
||||
list: async () =>
|
||||
USE_MOCK_API
|
||||
? mockApi.documents.list()
|
||||
: apiCall('/api/v1/documents'),
|
||||
|
||||
upload: async (file: File) => {
|
||||
if (USE_MOCK_API) {
|
||||
return mockApi.documents.upload(file);
|
||||
}
|
||||
|
||||
const token = getAuthToken();
|
||||
|
||||
// Check if token exists and is expired before uploading
|
||||
if (token && isTokenExpired(token)) {
|
||||
if (typeof window !== 'undefined') {
|
||||
localStorage.removeItem('gt2_token');
|
||||
localStorage.removeItem('gt2_user');
|
||||
localStorage.removeItem('gt2_tenant');
|
||||
window.location.href = '/auth/login?session_expired=true';
|
||||
}
|
||||
throw new Error('Token expired');
|
||||
}
|
||||
|
||||
const formData = new FormData();
|
||||
formData.append('file', file);
|
||||
|
||||
const response = await fetch('/api/v1/documents', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
...(token && { Authorization: `Bearer ${token}` }),
|
||||
},
|
||||
body: formData,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Upload failed: ${response.status}`);
|
||||
}
|
||||
|
||||
return response.json();
|
||||
},
|
||||
|
||||
delete: async (id: string) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.documents.delete(id)
|
||||
: apiCall(`/api/v1/documents/${id}`, {
|
||||
method: 'DELETE',
|
||||
}),
|
||||
|
||||
getChunks: async (id: string) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.documents.getChunks(id)
|
||||
: apiCall(`/api/v1/documents/${id}/chunks`),
|
||||
},
|
||||
|
||||
// RAG/Knowledge endpoints
|
||||
rag: {
|
||||
search: async (query: string, datasetIds?: string[]) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.rag.search(query, datasetIds)
|
||||
: apiCall('/api/v1/rag/search', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ query, dataset_ids: datasetIds }),
|
||||
}),
|
||||
|
||||
getDatasets: async () =>
|
||||
USE_MOCK_API
|
||||
? mockApi.rag.getDatasets()
|
||||
: apiCall('/api/v1/rag/datasets'),
|
||||
|
||||
createDataset: async (data: any) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.rag.createDataset(data)
|
||||
: apiCall('/api/v1/rag/datasets', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(data),
|
||||
}),
|
||||
},
|
||||
|
||||
// Agent endpoints
|
||||
agents: {
|
||||
list: async () =>
|
||||
USE_MOCK_API
|
||||
? mockApi.agents.list()
|
||||
: apiCall('/api/v1/agents'),
|
||||
|
||||
create: async (data: any) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.agents.create(data)
|
||||
: apiCall('/api/v1/agents', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(data),
|
||||
}),
|
||||
|
||||
execute: async (agentId: string, task: string) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.agents.execute(agentId, task)
|
||||
: apiCall(`/api/v1/agents/${agentId}/execute`, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ task }),
|
||||
}),
|
||||
},
|
||||
|
||||
// External Services endpoints
|
||||
services: {
|
||||
list: async () =>
|
||||
USE_MOCK_API
|
||||
? mockApi.services.list()
|
||||
: apiCall('/api/v1/webservices'),
|
||||
|
||||
getEmbedConfig: async (serviceId: string) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.services.getEmbedConfig(serviceId)
|
||||
: apiCall(`/api/v1/webservices/${serviceId}/embed-config`),
|
||||
},
|
||||
|
||||
// Games & AI Literacy endpoints
|
||||
games: {
|
||||
list: async () =>
|
||||
USE_MOCK_API
|
||||
? mockApi.games.list()
|
||||
: apiCall('/api/v1/games'),
|
||||
|
||||
startGame: async (gameType: string, options?: any) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.games.startGame(gameType, options)
|
||||
: apiCall(`/api/v1/games/${gameType}/start`, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(options || {}),
|
||||
}),
|
||||
|
||||
makeMove: async (gameId: string, move: any) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.games.makeMove(gameId, move)
|
||||
: apiCall(`/api/v1/games/${gameId}/move`, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ move }),
|
||||
}),
|
||||
|
||||
getProgress: async () =>
|
||||
USE_MOCK_API
|
||||
? mockApi.games.getProgress()
|
||||
: apiCall('/api/v1/learning/progress'),
|
||||
},
|
||||
|
||||
// Projects endpoints
|
||||
projects: {
|
||||
list: async () =>
|
||||
USE_MOCK_API
|
||||
? mockApi.projects.list()
|
||||
: apiCall('/api/v1/projects'),
|
||||
|
||||
create: async (data: any) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.projects.create(data)
|
||||
: apiCall('/api/v1/projects', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(data),
|
||||
}),
|
||||
|
||||
get: async (id: string) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.projects.get(id)
|
||||
: apiCall(`/api/v1/projects/${id}`),
|
||||
},
|
||||
|
||||
// Settings endpoints
|
||||
settings: {
|
||||
getPreferences: async () =>
|
||||
USE_MOCK_API
|
||||
? mockApi.settings.getPreferences()
|
||||
: apiCall('/api/v1/user/preferences'),
|
||||
|
||||
updatePreferences: async (data: any) =>
|
||||
USE_MOCK_API
|
||||
? mockApi.settings.updatePreferences(data)
|
||||
: apiCall('/api/v1/user/preferences', {
|
||||
method: 'PUT',
|
||||
body: JSON.stringify(data),
|
||||
}),
|
||||
},
|
||||
|
||||
// Generic upload method for services
|
||||
upload: async <T>(endpoint: string, formData: FormData, options: {
|
||||
headers?: Record<string, string>;
|
||||
onUploadProgress?: (progressEvent: ProgressEvent) => void;
|
||||
} = {}): Promise<T> => {
|
||||
const token = typeof window !== 'undefined' ? getAuthToken() : null;
|
||||
|
||||
// Check if token exists and is expired before uploading
|
||||
if (token && isTokenExpired(token)) {
|
||||
if (typeof window !== 'undefined') {
|
||||
localStorage.removeItem('gt2_token');
|
||||
localStorage.removeItem('gt2_user');
|
||||
localStorage.removeItem('gt2_tenant');
|
||||
window.location.href = '/auth/login?session_expired=true';
|
||||
}
|
||||
throw new Error('Token expired');
|
||||
}
|
||||
|
||||
const headers: HeadersInit = {
|
||||
...(token && { Authorization: `Bearer ${token}` }),
|
||||
...options.headers,
|
||||
};
|
||||
|
||||
// Create XMLHttpRequest for upload progress tracking
|
||||
return new Promise((resolve, reject) => {
|
||||
const xhr = new XMLHttpRequest();
|
||||
|
||||
// Setup upload progress tracking
|
||||
if (options.onUploadProgress) {
|
||||
xhr.upload.addEventListener('progress', options.onUploadProgress);
|
||||
}
|
||||
|
||||
xhr.onload = () => {
|
||||
if (xhr.status >= 200 && xhr.status < 300) {
|
||||
try {
|
||||
const response = JSON.parse(xhr.responseText);
|
||||
resolve(response);
|
||||
} catch (e) {
|
||||
reject(new Error('Invalid JSON response'));
|
||||
}
|
||||
} else {
|
||||
reject(new Error(`Upload failed: ${xhr.status} ${xhr.statusText}`));
|
||||
}
|
||||
};
|
||||
|
||||
xhr.onerror = () => {
|
||||
reject(new Error('Upload failed: Network error'));
|
||||
};
|
||||
|
||||
xhr.open('POST', endpoint);
|
||||
|
||||
// Set headers
|
||||
Object.entries(headers).forEach(([key, value]) => {
|
||||
if (value) xhr.setRequestHeader(key, value);
|
||||
});
|
||||
|
||||
xhr.send(formData);
|
||||
});
|
||||
},
|
||||
|
||||
// Helper methods
|
||||
get: async <T>(endpoint: string): Promise<T> => {
|
||||
return apiCall(endpoint);
|
||||
},
|
||||
|
||||
post: async <T>(endpoint: string, data: any): Promise<T> => {
|
||||
return apiCall(endpoint, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(data),
|
||||
});
|
||||
},
|
||||
|
||||
put: async <T>(endpoint: string, data: any): Promise<T> => {
|
||||
return apiCall(endpoint, {
|
||||
method: 'PUT',
|
||||
body: JSON.stringify(data),
|
||||
});
|
||||
},
|
||||
|
||||
delete: async <T>(endpoint: string): Promise<T> => {
|
||||
return apiCall(endpoint, {
|
||||
method: 'DELETE',
|
||||
});
|
||||
},
|
||||
};
|
||||
|
||||
export default api;
|
||||
65
apps/tenant-app/src/lib/api/client.ts
Normal file
65
apps/tenant-app/src/lib/api/client.ts
Normal file
@@ -0,0 +1,65 @@
|
||||
/**
|
||||
* API Client for GT 2.0 Tenant App
|
||||
* Handles HTTP requests to the tenant backend with authentication
|
||||
*/
|
||||
|
||||
interface ApiResponse<T> {
|
||||
data: T;
|
||||
status: number;
|
||||
statusText: string;
|
||||
}
|
||||
|
||||
class ApiClient {
|
||||
private baseURL: string;
|
||||
|
||||
constructor(baseURL: string = '/api/v1') {
|
||||
this.baseURL = baseURL;
|
||||
}
|
||||
|
||||
private async request<T>(
|
||||
method: string,
|
||||
endpoint: string,
|
||||
data?: any,
|
||||
headers?: Record<string, string>
|
||||
): Promise<ApiResponse<T>> {
|
||||
const url = `${this.baseURL}${endpoint}`;
|
||||
const config: RequestInit = {
|
||||
method,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...headers,
|
||||
},
|
||||
};
|
||||
|
||||
if (data) {
|
||||
config.body = JSON.stringify(data);
|
||||
}
|
||||
|
||||
const response = await fetch(url, config);
|
||||
const responseData = await response.json();
|
||||
|
||||
return {
|
||||
data: responseData,
|
||||
status: response.status,
|
||||
statusText: response.statusText,
|
||||
};
|
||||
}
|
||||
|
||||
async get<T>(endpoint: string, headers?: Record<string, string>): Promise<ApiResponse<T>> {
|
||||
return this.request<T>('GET', endpoint, undefined, headers);
|
||||
}
|
||||
|
||||
async post<T>(endpoint: string, data?: any, headers?: Record<string, string>): Promise<ApiResponse<T>> {
|
||||
return this.request<T>('POST', endpoint, data, headers);
|
||||
}
|
||||
|
||||
async put<T>(endpoint: string, data?: any, headers?: Record<string, string>): Promise<ApiResponse<T>> {
|
||||
return this.request<T>('PUT', endpoint, data, headers);
|
||||
}
|
||||
|
||||
async delete<T>(endpoint: string, headers?: Record<string, string>): Promise<ApiResponse<T>> {
|
||||
return this.request<T>('DELETE', endpoint, undefined, headers);
|
||||
}
|
||||
}
|
||||
|
||||
export const apiClient = new ApiClient();
|
||||
261
apps/tenant-app/src/lib/api/external-services.ts
Normal file
261
apps/tenant-app/src/lib/api/external-services.ts
Normal file
@@ -0,0 +1,261 @@
|
||||
/**
|
||||
* External Services API Client
|
||||
* Handles external web services integration with Resource Cluster
|
||||
*/
|
||||
|
||||
import { apiClient } from '@/lib/api/client';
|
||||
|
||||
export interface ServiceInstance {
|
||||
id: string;
|
||||
service_type: string;
|
||||
service_name: string;
|
||||
description?: string;
|
||||
endpoint_url: string;
|
||||
status: string;
|
||||
health_status: string;
|
||||
created_by: string;
|
||||
allowed_users: string[];
|
||||
access_level: string;
|
||||
created_at: string;
|
||||
last_accessed?: string;
|
||||
}
|
||||
|
||||
export interface ServiceType {
|
||||
type: string;
|
||||
name: string;
|
||||
description: string;
|
||||
category: string;
|
||||
features: string[];
|
||||
resource_requirements: {
|
||||
cpu: string;
|
||||
memory: string;
|
||||
storage: string;
|
||||
};
|
||||
estimated_startup_time: string;
|
||||
sso_supported: boolean;
|
||||
}
|
||||
|
||||
export interface CreateServiceRequest {
|
||||
service_type: string;
|
||||
service_name: string;
|
||||
description?: string;
|
||||
config_overrides?: Record<string, any>;
|
||||
template_id?: string;
|
||||
}
|
||||
|
||||
export interface EmbedConfig {
|
||||
iframe_url: string;
|
||||
sandbox_attributes: string[];
|
||||
security_policies: {
|
||||
allow: string;
|
||||
referrerpolicy: string;
|
||||
loading: string;
|
||||
};
|
||||
sso_token: string;
|
||||
expires_at: string;
|
||||
}
|
||||
|
||||
export interface ServiceAnalytics {
|
||||
instance_id: string;
|
||||
service_type: string;
|
||||
service_name: string;
|
||||
analytics_period_days: number;
|
||||
total_sessions: number;
|
||||
total_time_hours: number;
|
||||
unique_users: number;
|
||||
average_session_duration_minutes: number;
|
||||
daily_usage: Record<string, any>;
|
||||
uptime_percentage: number;
|
||||
}
|
||||
|
||||
export class ExternalServicesAPI {
|
||||
/**
|
||||
* Create a new external service instance
|
||||
*/
|
||||
async createService(request: CreateServiceRequest): Promise<ServiceInstance> {
|
||||
try {
|
||||
const response = await apiClient.post<ServiceInstance>('/external-services/create', request);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Failed to create service:', error);
|
||||
throw this.handleError(error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List all external services accessible to the user
|
||||
*/
|
||||
async listServices(options?: {
|
||||
service_type?: string;
|
||||
status?: string;
|
||||
}): Promise<{ services: ServiceInstance[]; total: number }> {
|
||||
try {
|
||||
const params = new URLSearchParams();
|
||||
if (options?.service_type) params.append('service_type', options.service_type);
|
||||
if (options?.status) params.append('status', options.status);
|
||||
|
||||
const response = await apiClient.get<{ services: ServiceInstance[]; total: number }>(
|
||||
`/external-services/list?${params.toString()}`
|
||||
);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Failed to list services:', error);
|
||||
throw this.handleError(error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get specific external service details
|
||||
*/
|
||||
async getService(instanceId: string): Promise<ServiceInstance> {
|
||||
try {
|
||||
const response = await apiClient.get<ServiceInstance>(`/external-services/${instanceId}`);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Failed to get service:', error);
|
||||
throw this.handleError(error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop an external service instance
|
||||
*/
|
||||
async stopService(instanceId: string): Promise<{ success: boolean; message: string; stopped_at: string }> {
|
||||
try {
|
||||
const response = await apiClient.delete<{ success: boolean; message: string; stopped_at: string }>(
|
||||
`/external-services/${instanceId}`
|
||||
);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Failed to stop service:', error);
|
||||
throw this.handleError(error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get service health status
|
||||
*/
|
||||
async getServiceHealth(instanceId: string): Promise<Record<string, any>> {
|
||||
try {
|
||||
const response = await apiClient.get<Record<string, any>>(`/external-services/${instanceId}/health`);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Failed to get service health:', error);
|
||||
throw this.handleError(error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get iframe embed configuration with SSO token
|
||||
*/
|
||||
async getEmbedConfig(instanceId: string): Promise<EmbedConfig> {
|
||||
try {
|
||||
const response = await apiClient.post<EmbedConfig>(`/external-services/${instanceId}/embed-config`, {});
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Failed to get embed config:', error);
|
||||
throw this.handleError(error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get service usage analytics
|
||||
*/
|
||||
async getServiceAnalytics(instanceId: string, days: number = 30): Promise<ServiceAnalytics> {
|
||||
try {
|
||||
const response = await apiClient.get<ServiceAnalytics>(
|
||||
`/external-services/${instanceId}/analytics?days=${days}`
|
||||
);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Failed to get service analytics:', error);
|
||||
throw this.handleError(error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Share service instance with other users
|
||||
*/
|
||||
async shareService(
|
||||
instanceId: string,
|
||||
shareWithEmails: string[],
|
||||
accessLevel: string = 'read'
|
||||
): Promise<{ success: boolean; shared_with: string[]; access_level: string; shared_at: string }> {
|
||||
try {
|
||||
const response = await apiClient.post<{
|
||||
success: boolean;
|
||||
shared_with: string[];
|
||||
access_level: string;
|
||||
shared_at: string;
|
||||
}>(`/external-services/${instanceId}/share`, {
|
||||
share_with_emails: shareWithEmails,
|
||||
access_level: accessLevel
|
||||
});
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Failed to share service:', error);
|
||||
throw this.handleError(error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List available service templates
|
||||
*/
|
||||
async listServiceTemplates(options?: {
|
||||
service_type?: string;
|
||||
category?: string;
|
||||
}): Promise<{ templates: any[]; total: number }> {
|
||||
try {
|
||||
const params = new URLSearchParams();
|
||||
if (options?.service_type) params.append('service_type', options.service_type);
|
||||
if (options?.category) params.append('category', options.category);
|
||||
|
||||
const response = await apiClient.get<{ templates: any[]; total: number }>(
|
||||
`/external-services/templates/list?${params.toString()}`
|
||||
);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Failed to list service templates:', error);
|
||||
throw this.handleError(error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get supported external service types and their capabilities
|
||||
*/
|
||||
async getSupportedServiceTypes(): Promise<{
|
||||
supported_types: ServiceType[];
|
||||
total_types: number;
|
||||
categories: string[];
|
||||
extensible: boolean;
|
||||
}> {
|
||||
try {
|
||||
const response = await apiClient.get<{
|
||||
supported_types: ServiceType[];
|
||||
total_types: number;
|
||||
categories: string[];
|
||||
extensible: boolean;
|
||||
}>('/external-services/types/supported');
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Failed to get supported service types:', error);
|
||||
throw this.handleError(error);
|
||||
}
|
||||
}
|
||||
|
||||
private handleError(error: any): Error {
|
||||
if (error.response?.data?.error) {
|
||||
return new Error(error.response.data.error.message || 'Unknown server error');
|
||||
}
|
||||
if (error.response?.data?.detail) {
|
||||
return new Error(error.response.data.detail);
|
||||
}
|
||||
if (error.message) {
|
||||
return new Error(error.message);
|
||||
}
|
||||
return new Error('An unexpected error occurred');
|
||||
}
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
export const externalServicesAPI = new ExternalServicesAPI();
|
||||
163
apps/tenant-app/src/lib/capabilities.ts
Normal file
163
apps/tenant-app/src/lib/capabilities.ts
Normal file
@@ -0,0 +1,163 @@
|
||||
/**
|
||||
* GT 2.0 Capability Management Utilities
|
||||
*
|
||||
* Provides capability-based authorization following GT 2.0's security model.
|
||||
*/
|
||||
|
||||
import { getAuthToken, parseCapabilities, isTokenValid } from '@/services/auth';
|
||||
|
||||
// GT 2.0 Standard Capabilities
|
||||
export const GT2_CAPABILITIES = {
|
||||
// Agent Management (Primary)
|
||||
AGENTS_READ: 'agents:read',
|
||||
AGENTS_CREATE: 'agents:create',
|
||||
AGENTS_EDIT: 'agents:edit',
|
||||
AGENTS_DELETE: 'agents:delete',
|
||||
AGENTS_SHARE: 'agents:share',
|
||||
AGENTS_EXECUTE: 'agents:execute',
|
||||
|
||||
// Agent Management (Legacy - Maps to Agents)
|
||||
AGENTS_READ: 'agents:read',
|
||||
AGENTS_CREATE: 'agents:create',
|
||||
ASSISTANTS_EDIT: 'agents:edit',
|
||||
AGENTS_DELETE: 'agents:delete',
|
||||
ASSISTANTS_SHARE: 'agents:share',
|
||||
|
||||
// Dataset Management
|
||||
DATASETS_READ: 'datasets:read',
|
||||
DATASETS_CREATE: 'datasets:create',
|
||||
DATASETS_UPLOAD: 'datasets:upload',
|
||||
DATASETS_DELETE: 'datasets:delete',
|
||||
DATASETS_SHARE: 'datasets:share',
|
||||
|
||||
// Conversation Management
|
||||
CONVERSATIONS_READ: 'conversations:read',
|
||||
CONVERSATIONS_CREATE: 'conversations:create',
|
||||
CONVERSATIONS_DELETE: 'conversations:delete',
|
||||
|
||||
// Document Management
|
||||
DOCUMENTS_READ: 'documents:read',
|
||||
DOCUMENTS_UPLOAD: 'documents:upload',
|
||||
DOCUMENTS_DELETE: 'documents:delete',
|
||||
|
||||
// Administrative
|
||||
ADMIN_USERS: 'admin:users',
|
||||
ADMIN_TENANTS: 'admin:tenants',
|
||||
ADMIN_SYSTEM: 'admin:system'
|
||||
} as const;
|
||||
|
||||
export type GT2Capability = typeof GT2_CAPABILITIES[keyof typeof GT2_CAPABILITIES];
|
||||
|
||||
/**
|
||||
* Check if user has specific capability
|
||||
*/
|
||||
export function hasCapability(capability: string): boolean {
|
||||
if (!isTokenValid()) return false;
|
||||
|
||||
const token = getAuthToken();
|
||||
if (!token) return false;
|
||||
|
||||
const capabilities = parseCapabilities(token);
|
||||
return capabilities.includes(capability);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if user has all required capabilities
|
||||
*/
|
||||
export function hasAllCapabilities(requiredCapabilities: string[]): boolean {
|
||||
if (requiredCapabilities.length === 0) return true;
|
||||
|
||||
return requiredCapabilities.every(cap => hasCapability(cap));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if user has any of the specified capabilities
|
||||
*/
|
||||
export function hasAnyCapability(capabilities: string[]): boolean {
|
||||
if (capabilities.length === 0) return true;
|
||||
|
||||
return capabilities.some(cap => hasCapability(cap));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get user's current capabilities
|
||||
*/
|
||||
export function getCurrentCapabilities(): string[] {
|
||||
if (!isTokenValid()) return [];
|
||||
|
||||
const token = getAuthToken();
|
||||
if (!token) return [];
|
||||
|
||||
return parseCapabilities(token);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if user can perform action on resource
|
||||
*/
|
||||
export function canPerformAction(resource: string, action: string): boolean {
|
||||
const capability = `${resource}:${action}`;
|
||||
return hasCapability(capability);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get user's role from capabilities (GT 2.0 role inference)
|
||||
*/
|
||||
export function inferUserRole(): 'admin' | 'developer' | 'analyst' | 'student' | 'guest' {
|
||||
const capabilities = getCurrentCapabilities();
|
||||
|
||||
if (capabilities.some(cap => cap.startsWith('admin:'))) {
|
||||
return 'admin';
|
||||
}
|
||||
|
||||
if (capabilities.includes(GT2_CAPABILITIES.AGENTS_CREATE) ||
|
||||
capabilities.includes(GT2_CAPABILITIES.AGENTS_CREATE)) {
|
||||
return 'developer';
|
||||
}
|
||||
|
||||
if (capabilities.includes(GT2_CAPABILITIES.DATASETS_CREATE) ||
|
||||
capabilities.includes(GT2_CAPABILITIES.DATASETS_UPLOAD)) {
|
||||
return 'analyst';
|
||||
}
|
||||
|
||||
if (capabilities.includes(GT2_CAPABILITIES.CONVERSATIONS_CREATE)) {
|
||||
return 'student';
|
||||
}
|
||||
|
||||
return 'guest';
|
||||
}
|
||||
|
||||
/**
|
||||
* Create capability checker hook for React components
|
||||
*/
|
||||
export function createCapabilityChecker(requiredCapabilities: string[]) {
|
||||
return {
|
||||
hasAccess: hasAllCapabilities(requiredCapabilities),
|
||||
missingCapabilities: requiredCapabilities.filter(cap => !hasCapability(cap)),
|
||||
userCapabilities: getCurrentCapabilities()
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter items based on required capabilities
|
||||
*/
|
||||
export function filterByCapability<T extends { requiredCapability?: string }>(
|
||||
items: T[],
|
||||
defaultCapability?: string
|
||||
): T[] {
|
||||
return items.filter(item => {
|
||||
const required = item.requiredCapability || defaultCapability;
|
||||
return !required || hasCapability(required);
|
||||
});
|
||||
}
|
||||
|
||||
export default {
|
||||
GT2_CAPABILITIES,
|
||||
hasCapability,
|
||||
hasAllCapabilities,
|
||||
hasAnyCapability,
|
||||
getCurrentCapabilities,
|
||||
canPerformAction,
|
||||
inferUserRole,
|
||||
createCapabilityChecker,
|
||||
filterByCapability
|
||||
};
|
||||
70
apps/tenant-app/src/lib/chart-export.ts
Normal file
70
apps/tenant-app/src/lib/chart-export.ts
Normal file
@@ -0,0 +1,70 @@
|
||||
import html2canvas from 'html2canvas';
|
||||
|
||||
interface ExportOptions {
|
||||
element: HTMLElement;
|
||||
filename: string;
|
||||
backgroundColor?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Exports a chart component as a PNG image
|
||||
* @param options - Export configuration options
|
||||
* @returns Promise that resolves when export is complete
|
||||
*/
|
||||
export async function exportChartAsPNG(options: ExportOptions): Promise<void> {
|
||||
const { element, filename, backgroundColor = '#ffffff' } = options;
|
||||
|
||||
try {
|
||||
// Create canvas from the DOM element
|
||||
const canvas = await html2canvas(element, {
|
||||
backgroundColor,
|
||||
scale: 2, // Higher resolution for better quality
|
||||
logging: false,
|
||||
useCORS: true,
|
||||
allowTaint: true,
|
||||
onclone: (clonedDoc) => {
|
||||
// Ensure the cloned element is visible
|
||||
const clonedElement = clonedDoc.querySelector('[data-export-target]');
|
||||
if (clonedElement) {
|
||||
(clonedElement as HTMLElement).style.display = 'block';
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
// Convert canvas to blob
|
||||
canvas.toBlob((blob) => {
|
||||
if (!blob) {
|
||||
throw new Error('Failed to create image blob');
|
||||
}
|
||||
|
||||
// Create download link
|
||||
const url = URL.createObjectURL(blob);
|
||||
const link = document.createElement('a');
|
||||
link.href = url;
|
||||
link.download = filename;
|
||||
|
||||
// Trigger download
|
||||
document.body.appendChild(link);
|
||||
link.click();
|
||||
|
||||
// Cleanup
|
||||
document.body.removeChild(link);
|
||||
URL.revokeObjectURL(url);
|
||||
}, 'image/png');
|
||||
} catch (error) {
|
||||
console.error('Error exporting chart:', error);
|
||||
throw new Error('Failed to export chart as PNG');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a filename for chart export
|
||||
* @param metric - Current metric being displayed (conversations, messages, tokens)
|
||||
* @param dateRange - Date range string (e.g., "30d", "7d")
|
||||
* @returns Formatted filename with timestamp
|
||||
*/
|
||||
export function generateExportFilename(metric: string, dateRange: string): string {
|
||||
const timestamp = new Date().toISOString().replace(/[:.]/g, '-').split('T')[0] + '_' +
|
||||
new Date().toTimeString().split(' ')[0].replace(/:/g, '');
|
||||
return `usage_overview_${metric}_${dateRange}_${timestamp}.png`;
|
||||
}
|
||||
710
apps/tenant-app/src/lib/download-utils.ts
Normal file
710
apps/tenant-app/src/lib/download-utils.ts
Normal file
@@ -0,0 +1,710 @@
|
||||
import { saveAs } from 'file-saver';
|
||||
import { Document, Packer, Paragraph, TextRun, HeadingLevel, ExternalHyperlink, ImageRun, Table, TableRow, TableCell, WidthType, AlignmentType, LevelFormat, convertInchesToTwip, ShadingType } from 'docx';
|
||||
import { parseMarkdown } from './markdown-parser';
|
||||
import { renderMultipleDiagrams, createDiagramPlaceholder } from './mermaid-renderer';
|
||||
|
||||
export interface DownloadOptions {
|
||||
filename?: string;
|
||||
format: 'txt' | 'docx' | 'md';
|
||||
content: string;
|
||||
title?: string;
|
||||
}
|
||||
|
||||
// Convert markdown to clean text
|
||||
function markdownToText(content: string): string {
|
||||
return content
|
||||
// Remove code blocks
|
||||
.replace(/```[\s\S]*?```/g, '[Code Block]')
|
||||
// Remove inline code
|
||||
.replace(/`([^`]+)`/g, '$1')
|
||||
// Remove links but keep text
|
||||
.replace(/\[([^\]]+)\]\([^)]+\)/g, '$1')
|
||||
// Remove images
|
||||
.replace(/!\[([^\]]*)\]\([^)]+\)/g, '[Image: $1]')
|
||||
// Remove headers
|
||||
.replace(/^#{1,6}\s+/gm, '')
|
||||
// Remove bold/italic
|
||||
.replace(/\*\*([^*]+)\*\*/g, '$1')
|
||||
.replace(/\*([^*]+)\*/g, '$1')
|
||||
// Remove blockquotes
|
||||
.replace(/^>\s*/gm, '')
|
||||
// Clean up extra whitespace
|
||||
.replace(/\n\s*\n/g, '\n\n')
|
||||
.trim();
|
||||
}
|
||||
|
||||
// Helper interface for inline formatting segments
|
||||
interface TextSegment {
|
||||
text: string;
|
||||
bold?: boolean;
|
||||
italic?: boolean;
|
||||
link?: string;
|
||||
code?: boolean;
|
||||
}
|
||||
|
||||
// Parse inline markdown formatting (bold, italic, links) - Used by DOCX export
|
||||
function parseInlineFormatting(line: string): TextSegment[] {
|
||||
// Handle empty or whitespace-only lines
|
||||
if (!line || !line.trim()) {
|
||||
return [{ text: line }];
|
||||
}
|
||||
|
||||
const segments: TextSegment[] = [];
|
||||
let currentPos = 0;
|
||||
|
||||
// Combined regex for inline code (`text`), bold (**text**), italic (*text*), and links ([text](url))
|
||||
// Order matters: match ` first, then ** before * to avoid conflicts
|
||||
// Groups: 1-2: inline code, 3-4: bold, 5-6: italic, 7-8: links
|
||||
const regex = /(`([^`\n]+?)`)|(\*\*([^*\n]+?)\*\*)|(?<!\*)(\*([^*\n]+?)\*)(?!\*)|\[([^\]\n]+)\]\(([^)\n]+)\)/g;
|
||||
let match;
|
||||
let iterations = 0;
|
||||
const MAX_ITERATIONS = 1000; // Prevent infinite loops
|
||||
|
||||
try {
|
||||
while ((match = regex.exec(line)) !== null && iterations < MAX_ITERATIONS) {
|
||||
iterations++;
|
||||
|
||||
// Add text before this match
|
||||
if (match.index > currentPos) {
|
||||
const beforeText = line.substring(currentPos, match.index);
|
||||
if (beforeText) {
|
||||
segments.push({ text: beforeText });
|
||||
}
|
||||
}
|
||||
|
||||
if (match[1]) {
|
||||
// Inline code: `text`
|
||||
segments.push({ text: match[2], code: true });
|
||||
} else if (match[3]) {
|
||||
// Bold: **text**
|
||||
segments.push({ text: match[4], bold: true });
|
||||
} else if (match[5]) {
|
||||
// Italic: *text* (but not part of **)
|
||||
segments.push({ text: match[6], italic: true });
|
||||
} else if (match[7]) {
|
||||
// Link: [text](url)
|
||||
segments.push({ text: match[7], link: match[8] });
|
||||
}
|
||||
|
||||
currentPos = regex.lastIndex;
|
||||
}
|
||||
|
||||
// Add remaining text after last match
|
||||
if (currentPos < line.length) {
|
||||
const remainingText = line.substring(currentPos);
|
||||
if (remainingText) {
|
||||
segments.push({ text: remainingText });
|
||||
}
|
||||
}
|
||||
|
||||
// If no formatting found, return original line as single segment
|
||||
return segments.length > 0 ? segments : [{ text: line }];
|
||||
} catch (error) {
|
||||
// If regex fails, return original line as plain text
|
||||
console.warn('parseInlineFormatting failed:', error);
|
||||
return [{ text: line }];
|
||||
}
|
||||
}
|
||||
|
||||
export async function downloadContent(options: DownloadOptions): Promise<void> {
|
||||
const { content, format, filename, title } = options;
|
||||
const timestamp = new Date().toISOString().split('T')[0];
|
||||
const defaultFilename = filename || `gt-chat-response-${timestamp}`;
|
||||
|
||||
try {
|
||||
switch (format) {
|
||||
case 'txt': {
|
||||
const textContent = markdownToText(content);
|
||||
const blob = new Blob([textContent], { type: 'text/plain;charset=utf-8' });
|
||||
saveAs(blob, `${defaultFilename}.txt`);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'md': {
|
||||
const blob = new Blob([content], { type: 'text/markdown;charset=utf-8' });
|
||||
saveAs(blob, `${defaultFilename}.md`);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'docx': {
|
||||
// Parse markdown for enhanced rendering
|
||||
const parsed = parseMarkdown(content);
|
||||
|
||||
// Render Mermaid diagrams to PNG (if any)
|
||||
const diagramResults = parsed.mermaidBlocks.length > 0
|
||||
? await renderMultipleDiagrams(parsed.mermaidBlocks.map(b => b.code))
|
||||
: [];
|
||||
|
||||
let diagramIndex = 0;
|
||||
|
||||
const children: (Paragraph | Table)[] = [];
|
||||
|
||||
// Add title if provided
|
||||
if (title) {
|
||||
children.push(
|
||||
new Paragraph({
|
||||
children: [new TextRun({ text: title, bold: true, size: 32 })],
|
||||
heading: HeadingLevel.HEADING_1,
|
||||
spacing: { after: 400 },
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
// Process content line by line with formatting
|
||||
const lines = content.split('\n');
|
||||
let currentParagraphRuns: Array<TextRun | ExternalHyperlink> = [];
|
||||
let inCodeBlock = false;
|
||||
let codeBlockLang = '';
|
||||
let codeBlockContent: string[] = [];
|
||||
let inTable = false;
|
||||
let tableRows: TableRow[] = [];
|
||||
let tableColumnCount = 0;
|
||||
|
||||
const headingLevels = [
|
||||
HeadingLevel.HEADING_1,
|
||||
HeadingLevel.HEADING_2,
|
||||
HeadingLevel.HEADING_3,
|
||||
HeadingLevel.HEADING_4,
|
||||
HeadingLevel.HEADING_5,
|
||||
HeadingLevel.HEADING_6,
|
||||
];
|
||||
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const line = lines[i];
|
||||
|
||||
// Empty line - flush current paragraph
|
||||
if (!line.trim()) {
|
||||
if (currentParagraphRuns.length > 0) {
|
||||
children.push(
|
||||
new Paragraph({
|
||||
children: currentParagraphRuns,
|
||||
spacing: { after: 200 },
|
||||
})
|
||||
);
|
||||
currentParagraphRuns = [];
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Detect code block start/end
|
||||
if (line.startsWith('```')) {
|
||||
// Flush current paragraph
|
||||
if (currentParagraphRuns.length > 0) {
|
||||
children.push(new Paragraph({ children: currentParagraphRuns }));
|
||||
currentParagraphRuns = [];
|
||||
}
|
||||
|
||||
if (!inCodeBlock) {
|
||||
// Start of code block
|
||||
inCodeBlock = true;
|
||||
codeBlockLang = line.substring(3).trim();
|
||||
continue;
|
||||
} else {
|
||||
// End of code block
|
||||
if (codeBlockLang === 'mermaid' && diagramIndex < diagramResults.length) {
|
||||
// Render Mermaid diagram as image
|
||||
const result = diagramResults[diagramIndex];
|
||||
diagramIndex++;
|
||||
|
||||
if (result.success && result.data) {
|
||||
// Convert base64 PNG to Uint8Array (browser-compatible, not Buffer)
|
||||
const base64Data = result.data.split(',')[1];
|
||||
const binaryString = atob(base64Data);
|
||||
const bytes = new Uint8Array(binaryString.length);
|
||||
for (let i = 0; i < binaryString.length; i++) {
|
||||
bytes[i] = binaryString.charCodeAt(i);
|
||||
}
|
||||
|
||||
// Calculate dimensions (max 6.5 inches width for Letter size with 1" margins)
|
||||
const PAGE_WIDTH_INCHES = 6.5;
|
||||
const DPI = 96;
|
||||
const maxWidthPixels = PAGE_WIDTH_INCHES * DPI;
|
||||
|
||||
const imgWidth = result.width || 800;
|
||||
const imgHeight = result.height || 600;
|
||||
const aspectRatio = imgHeight / imgWidth;
|
||||
|
||||
let renderWidth = Math.min(imgWidth, maxWidthPixels);
|
||||
let renderHeight = renderWidth * aspectRatio;
|
||||
|
||||
// Add image to document
|
||||
children.push(
|
||||
new Paragraph({
|
||||
children: [
|
||||
new ImageRun({
|
||||
data: bytes,
|
||||
transformation: {
|
||||
width: renderWidth,
|
||||
height: renderHeight,
|
||||
},
|
||||
}),
|
||||
],
|
||||
spacing: { before: 200, after: 200 },
|
||||
})
|
||||
);
|
||||
} else {
|
||||
// Diagram failed - show placeholder
|
||||
const placeholder = createDiagramPlaceholder(result.error);
|
||||
children.push(
|
||||
new Paragraph({
|
||||
children: [new TextRun({ text: placeholder, color: 'FF0000' })],
|
||||
spacing: { after: 200 },
|
||||
})
|
||||
);
|
||||
console.warn(`DOCX export: ${placeholder}`);
|
||||
}
|
||||
} else if (codeBlockContent.length > 0) {
|
||||
// Render non-mermaid code block content
|
||||
children.push(
|
||||
new Paragraph({
|
||||
children: [
|
||||
new TextRun({
|
||||
text: codeBlockContent.join('\n'),
|
||||
font: 'Courier New',
|
||||
size: 20, // 10pt
|
||||
color: '000000', // Black text - required for visibility on gray background
|
||||
}),
|
||||
],
|
||||
shading: { fill: 'E5E7EB', type: ShadingType.SOLID },
|
||||
spacing: { before: 200, after: 200 },
|
||||
})
|
||||
);
|
||||
}
|
||||
inCodeBlock = false;
|
||||
codeBlockLang = '';
|
||||
codeBlockContent = [];
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Accumulate content inside code blocks (mermaid is handled separately via diagramResults)
|
||||
if (inCodeBlock) {
|
||||
if (codeBlockLang !== 'mermaid') {
|
||||
codeBlockContent.push(line);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for headers
|
||||
const headerMatch = line.match(/^(#{1,6})\s+(.+)/);
|
||||
if (headerMatch) {
|
||||
// Flush current paragraph
|
||||
if (currentParagraphRuns.length > 0) {
|
||||
children.push(new Paragraph({ children: currentParagraphRuns }));
|
||||
currentParagraphRuns = [];
|
||||
}
|
||||
|
||||
// Flush any open table before starting header
|
||||
if (inTable && tableRows.length > 0) {
|
||||
const totalWidth = 9360;
|
||||
const columnWidth = Math.floor(totalWidth / tableColumnCount);
|
||||
const columnWidths = Array(tableColumnCount).fill(columnWidth);
|
||||
|
||||
children.push(new Table({
|
||||
rows: tableRows,
|
||||
width: { size: totalWidth, type: WidthType.DXA },
|
||||
columnWidths: columnWidths,
|
||||
}));
|
||||
tableRows = [];
|
||||
tableColumnCount = 0;
|
||||
inTable = false;
|
||||
}
|
||||
|
||||
const level = headerMatch[1].length;
|
||||
const headerText = headerMatch[2];
|
||||
|
||||
children.push(
|
||||
new Paragraph({
|
||||
text: headerText,
|
||||
heading: headingLevels[level - 1],
|
||||
spacing: { before: 200, after: 100 },
|
||||
})
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for numbered list items
|
||||
const numberedListMatch = line.match(/^(\s*)(\d+)[\.)]\s+(.+)/);
|
||||
if (numberedListMatch) {
|
||||
// Flush current paragraph
|
||||
if (currentParagraphRuns.length > 0) {
|
||||
children.push(new Paragraph({ children: currentParagraphRuns }));
|
||||
currentParagraphRuns = [];
|
||||
}
|
||||
|
||||
// Flush any open table before starting numbered list
|
||||
if (inTable && tableRows.length > 0) {
|
||||
const totalWidth = 9360;
|
||||
const columnWidth = Math.floor(totalWidth / tableColumnCount);
|
||||
const columnWidths = Array(tableColumnCount).fill(columnWidth);
|
||||
|
||||
children.push(new Table({
|
||||
rows: tableRows,
|
||||
width: { size: totalWidth, type: WidthType.DXA },
|
||||
columnWidths: columnWidths,
|
||||
}));
|
||||
tableRows = [];
|
||||
tableColumnCount = 0;
|
||||
inTable = false;
|
||||
}
|
||||
|
||||
const indentLevel = Math.floor(numberedListMatch[1].length / 2);
|
||||
const listText = numberedListMatch[3];
|
||||
|
||||
// Parse inline formatting in list item text
|
||||
const segments = parseInlineFormatting(listText);
|
||||
const listItemRuns: Array<TextRun | ExternalHyperlink> = [];
|
||||
|
||||
segments.forEach(segment => {
|
||||
if (segment.link) {
|
||||
listItemRuns.push(
|
||||
new ExternalHyperlink({
|
||||
children: [new TextRun({
|
||||
text: segment.text,
|
||||
color: '0000FF',
|
||||
underline: {}
|
||||
})],
|
||||
link: segment.link,
|
||||
})
|
||||
);
|
||||
} else {
|
||||
listItemRuns.push(
|
||||
new TextRun({
|
||||
text: segment.text,
|
||||
bold: segment.bold,
|
||||
italics: segment.italic,
|
||||
font: segment.code ? 'Courier New' : undefined,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
children.push(
|
||||
new Paragraph({
|
||||
children: listItemRuns,
|
||||
numbering: { reference: 'default-numbering', level: indentLevel },
|
||||
spacing: { after: 100 },
|
||||
})
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for bullet list items
|
||||
const bulletListMatch = line.match(/^(\s*)[-*]\s+(.+)/);
|
||||
if (bulletListMatch) {
|
||||
// Flush current paragraph
|
||||
if (currentParagraphRuns.length > 0) {
|
||||
children.push(new Paragraph({ children: currentParagraphRuns }));
|
||||
currentParagraphRuns = [];
|
||||
}
|
||||
|
||||
// Flush any open table before starting bullet list
|
||||
if (inTable && tableRows.length > 0) {
|
||||
const totalWidth = 9360;
|
||||
const columnWidth = Math.floor(totalWidth / tableColumnCount);
|
||||
const columnWidths = Array(tableColumnCount).fill(columnWidth);
|
||||
|
||||
children.push(new Table({
|
||||
rows: tableRows,
|
||||
width: { size: totalWidth, type: WidthType.DXA },
|
||||
columnWidths: columnWidths,
|
||||
}));
|
||||
tableRows = [];
|
||||
tableColumnCount = 0;
|
||||
inTable = false;
|
||||
}
|
||||
|
||||
const indentLevel = Math.floor(bulletListMatch[1].length / 2);
|
||||
const listText = bulletListMatch[2];
|
||||
|
||||
// Parse inline formatting in list item text
|
||||
const segments = parseInlineFormatting(listText);
|
||||
const listItemRuns: Array<TextRun | ExternalHyperlink> = [];
|
||||
|
||||
segments.forEach(segment => {
|
||||
if (segment.link) {
|
||||
listItemRuns.push(
|
||||
new ExternalHyperlink({
|
||||
children: [new TextRun({
|
||||
text: segment.text,
|
||||
color: '0000FF',
|
||||
underline: {}
|
||||
})],
|
||||
link: segment.link,
|
||||
})
|
||||
);
|
||||
} else {
|
||||
listItemRuns.push(
|
||||
new TextRun({
|
||||
text: segment.text,
|
||||
bold: segment.bold,
|
||||
italics: segment.italic,
|
||||
font: segment.code ? 'Courier New' : undefined,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
children.push(
|
||||
new Paragraph({
|
||||
children: listItemRuns,
|
||||
bullet: { level: indentLevel },
|
||||
spacing: { after: 100 },
|
||||
})
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for table rows
|
||||
if (line.includes('|') && line.split('|').length > 2) {
|
||||
// Skip separator lines (|---|---|)
|
||||
if (line.match(/^\|[\s\-\|:]+\|$/)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Flush current paragraph
|
||||
if (currentParagraphRuns.length > 0) {
|
||||
children.push(new Paragraph({ children: currentParagraphRuns }));
|
||||
currentParagraphRuns = [];
|
||||
}
|
||||
|
||||
// Parse table cells
|
||||
const cells = line
|
||||
.split('|')
|
||||
.map(cell => cell.trim())
|
||||
.filter(cell => cell !== '');
|
||||
|
||||
if (cells.length > 0) {
|
||||
// Check if this is a new table (different column count)
|
||||
if (inTable && tableColumnCount > 0 && cells.length !== tableColumnCount) {
|
||||
// Flush the previous table
|
||||
if (tableRows.length > 0) {
|
||||
const totalWidth = 9360;
|
||||
const columnWidth = Math.floor(totalWidth / tableColumnCount);
|
||||
const columnWidths = Array(tableColumnCount).fill(columnWidth);
|
||||
|
||||
children.push(new Table({
|
||||
rows: tableRows,
|
||||
width: { size: totalWidth, type: WidthType.DXA },
|
||||
columnWidths: columnWidths,
|
||||
}));
|
||||
tableRows = [];
|
||||
}
|
||||
// Reset for new table
|
||||
tableColumnCount = cells.length;
|
||||
} else if (!inTable) {
|
||||
// Starting a new table
|
||||
inTable = true;
|
||||
tableColumnCount = cells.length;
|
||||
}
|
||||
|
||||
// Create table cells with inline formatting
|
||||
const tableCells = cells.map(cell => {
|
||||
const cellSegments = parseInlineFormatting(cell);
|
||||
const cellRuns: Array<TextRun | ExternalHyperlink> = [];
|
||||
|
||||
cellSegments.forEach(segment => {
|
||||
if (segment.link) {
|
||||
cellRuns.push(
|
||||
new ExternalHyperlink({
|
||||
children: [new TextRun({
|
||||
text: segment.text,
|
||||
color: '0000FF',
|
||||
underline: {}
|
||||
})],
|
||||
link: segment.link,
|
||||
})
|
||||
);
|
||||
} else {
|
||||
cellRuns.push(
|
||||
new TextRun({
|
||||
text: segment.text,
|
||||
bold: segment.bold,
|
||||
italics: segment.italic,
|
||||
font: segment.code ? 'Courier New' : undefined,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
return new TableCell({
|
||||
children: [new Paragraph({
|
||||
children: cellRuns,
|
||||
spacing: { before: 100, after: 100 },
|
||||
})],
|
||||
margins: {
|
||||
top: 100,
|
||||
bottom: 100,
|
||||
left: 100,
|
||||
right: 100,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
// Add row to table
|
||||
tableRows.push(new TableRow({ children: tableCells }));
|
||||
}
|
||||
continue;
|
||||
} else if (inTable) {
|
||||
// End of table - flush accumulated rows
|
||||
if (tableRows.length > 0) {
|
||||
// Calculate equal column widths in DXA (9360 total = 6.5")
|
||||
const totalWidth = 9360;
|
||||
const columnWidth = Math.floor(totalWidth / tableColumnCount);
|
||||
const columnWidths = Array(tableColumnCount).fill(columnWidth);
|
||||
|
||||
children.push(new Table({
|
||||
rows: tableRows,
|
||||
width: { size: totalWidth, type: WidthType.DXA },
|
||||
columnWidths: columnWidths,
|
||||
}));
|
||||
tableRows = [];
|
||||
tableColumnCount = 0;
|
||||
inTable = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Regular text - parse inline formatting
|
||||
const segments = parseInlineFormatting(line);
|
||||
|
||||
segments.forEach(segment => {
|
||||
if (segment.link) {
|
||||
currentParagraphRuns.push(
|
||||
new ExternalHyperlink({
|
||||
children: [new TextRun({
|
||||
text: segment.text,
|
||||
color: '0000FF',
|
||||
underline: {}
|
||||
})],
|
||||
link: segment.link,
|
||||
})
|
||||
);
|
||||
} else {
|
||||
currentParagraphRuns.push(
|
||||
new TextRun({
|
||||
text: segment.text,
|
||||
bold: segment.bold,
|
||||
italics: segment.italic,
|
||||
font: segment.code ? 'Courier New' : undefined,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
// Flush at end of line if next line is empty or last line
|
||||
if (i === lines.length - 1 || !lines[i + 1].trim()) {
|
||||
children.push(
|
||||
new Paragraph({
|
||||
children: currentParagraphRuns,
|
||||
spacing: { after: 200 },
|
||||
})
|
||||
);
|
||||
currentParagraphRuns = [];
|
||||
}
|
||||
}
|
||||
|
||||
// Flush any remaining table at end of content
|
||||
if (inTable && tableRows.length > 0) {
|
||||
// Calculate equal column widths in DXA (9360 total = 6.5")
|
||||
const totalWidth = 9360;
|
||||
const columnWidth = Math.floor(totalWidth / tableColumnCount);
|
||||
const columnWidths = Array(tableColumnCount).fill(columnWidth);
|
||||
|
||||
children.push(new Table({
|
||||
rows: tableRows,
|
||||
width: { size: totalWidth, type: WidthType.DXA },
|
||||
columnWidths: columnWidths,
|
||||
}));
|
||||
}
|
||||
|
||||
// Create document with numbering configuration
|
||||
const doc = new Document({
|
||||
numbering: {
|
||||
config: [
|
||||
{
|
||||
reference: 'default-numbering',
|
||||
levels: [
|
||||
{
|
||||
level: 0,
|
||||
format: LevelFormat.DECIMAL,
|
||||
text: '%1.',
|
||||
alignment: AlignmentType.START,
|
||||
style: {
|
||||
paragraph: {
|
||||
indent: {
|
||||
left: convertInchesToTwip(0.5),
|
||||
hanging: convertInchesToTwip(0.18)
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
level: 1,
|
||||
format: LevelFormat.DECIMAL,
|
||||
text: '%2.',
|
||||
alignment: AlignmentType.START,
|
||||
style: {
|
||||
paragraph: {
|
||||
indent: {
|
||||
left: convertInchesToTwip(1.0),
|
||||
hanging: convertInchesToTwip(0.18)
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
level: 2,
|
||||
format: LevelFormat.DECIMAL,
|
||||
text: '%3.',
|
||||
alignment: AlignmentType.START,
|
||||
style: {
|
||||
paragraph: {
|
||||
indent: {
|
||||
left: convertInchesToTwip(1.5),
|
||||
hanging: convertInchesToTwip(0.18)
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
sections: [
|
||||
{
|
||||
properties: {},
|
||||
children,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
// Generate and save DOCX file
|
||||
Packer.toBlob(doc).then((blob) => {
|
||||
saveAs(blob, `${defaultFilename}.docx`);
|
||||
});
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
throw new Error(`Unsupported format: ${format}`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Download failed:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Detect the best format based on content
|
||||
export function suggestFormat(content: string): string[] {
|
||||
return ['txt', 'md', 'docx'];
|
||||
}
|
||||
|
||||
// Get format description
|
||||
export function getFormatDescription(format: string): string {
|
||||
switch (format) {
|
||||
case 'txt': return 'Plain text file';
|
||||
case 'md': return 'Markdown file';
|
||||
case 'docx': return 'Word document';
|
||||
default: return 'Unknown format';
|
||||
}
|
||||
}
|
||||
291
apps/tenant-app/src/lib/markdown-parser.ts
Normal file
291
apps/tenant-app/src/lib/markdown-parser.ts
Normal file
@@ -0,0 +1,291 @@
|
||||
/**
|
||||
* Markdown Parser for Export Functionality
|
||||
*
|
||||
* Uses remark (already installed) for AST-based parsing.
|
||||
* Extracts links, formatting, headers, code blocks, and Mermaid diagrams
|
||||
* for use in PDF/DOCX exports.
|
||||
*
|
||||
* GT 2.0 Compliance:
|
||||
* - No mocks: Real parsing using remark AST
|
||||
* - Fail fast: Throws on critical errors
|
||||
* - Zero complexity: Reuses existing remark dependency
|
||||
*/
|
||||
|
||||
import { remark } from 'remark';
|
||||
import remarkGfm from 'remark-gfm';
|
||||
import type { Root, Paragraph, Heading, Link, Text, Code, InlineCode, Emphasis, Strong, List, ListItem, Table, Blockquote } from 'mdast';
|
||||
|
||||
export interface ParsedLink {
|
||||
text: string;
|
||||
url: string;
|
||||
title?: string;
|
||||
position: number;
|
||||
}
|
||||
|
||||
export interface ParsedFormatting {
|
||||
type: 'bold' | 'italic' | 'code' | 'strikethrough';
|
||||
text: string;
|
||||
range: [number, number];
|
||||
}
|
||||
|
||||
export interface ParsedHeader {
|
||||
level: 1 | 2 | 3 | 4 | 5 | 6;
|
||||
text: string;
|
||||
position: number;
|
||||
}
|
||||
|
||||
export interface ParsedCodeBlock {
|
||||
language: string | null;
|
||||
code: string;
|
||||
position: number;
|
||||
}
|
||||
|
||||
export interface ParsedMermaidBlock {
|
||||
code: string;
|
||||
position: number;
|
||||
}
|
||||
|
||||
export interface ParsedTable {
|
||||
headers: string[];
|
||||
rows: string[][];
|
||||
position: number;
|
||||
}
|
||||
|
||||
export interface ParsedList {
|
||||
type: 'ordered' | 'unordered';
|
||||
items: string[];
|
||||
position: number;
|
||||
}
|
||||
|
||||
export interface ParsedBlockquote {
|
||||
text: string;
|
||||
position: number;
|
||||
}
|
||||
|
||||
export interface ParsedMarkdown {
|
||||
links: ParsedLink[];
|
||||
headers: ParsedHeader[];
|
||||
codeBlocks: ParsedCodeBlock[];
|
||||
mermaidBlocks: ParsedMermaidBlock[];
|
||||
tables: ParsedTable[];
|
||||
lists: ParsedList[];
|
||||
blockquotes: ParsedBlockquote[];
|
||||
hasEmoji: boolean;
|
||||
hasUnsupportedChars: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract text content from AST node recursively
|
||||
*/
|
||||
function extractText(node: any): string {
|
||||
if (node.type === 'text') {
|
||||
return node.value;
|
||||
}
|
||||
if (node.children) {
|
||||
return node.children.map(extractText).join('');
|
||||
}
|
||||
return '';
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect emoji in text (common ranges)
|
||||
*/
|
||||
function hasEmojiChars(text: string): boolean {
|
||||
// Emoji ranges: emoticons, symbols, transport, etc.
|
||||
return /[\u{1F600}-\u{1F64F}\u{1F300}-\u{1F5FF}\u{1F680}-\u{1F6FF}\u{1F700}-\u{1F77F}\u{1F780}-\u{1F7FF}\u{1F800}-\u{1F8FF}\u{1F900}-\u{1F9FF}\u{1FA00}-\u{1FA6F}\u{1FA70}-\u{1FAFF}\u{2600}-\u{26FF}\u{2700}-\u{27BF}]/u.test(text);
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect potentially unsupported characters (CJK, RTL, etc.)
|
||||
*/
|
||||
function hasUnsupportedChars(text: string): boolean {
|
||||
// CJK ranges (Chinese, Japanese, Korean)
|
||||
const hasCJK = /[\u{4E00}-\u{9FFF}\u{3400}-\u{4DBF}\u{20000}-\u{2A6DF}\u{3040}-\u{309F}\u{30A0}-\u{30FF}\u{AC00}-\u{D7AF}]/u.test(text);
|
||||
|
||||
// RTL ranges (Arabic, Hebrew)
|
||||
const hasRTL = /[\u{0600}-\u{06FF}\u{0750}-\u{077F}\u{0590}-\u{05FF}]/u.test(text);
|
||||
|
||||
return hasCJK || hasRTL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse markdown content into structured data for exports
|
||||
*/
|
||||
export function parseMarkdown(content: string): ParsedMarkdown {
|
||||
if (!content || typeof content !== 'string') {
|
||||
throw new Error('Invalid markdown content: must be a non-empty string');
|
||||
}
|
||||
|
||||
const result: ParsedMarkdown = {
|
||||
links: [],
|
||||
headers: [],
|
||||
codeBlocks: [],
|
||||
mermaidBlocks: [],
|
||||
tables: [],
|
||||
lists: [],
|
||||
blockquotes: [],
|
||||
hasEmoji: hasEmojiChars(content),
|
||||
hasUnsupportedChars: hasUnsupportedChars(content),
|
||||
};
|
||||
|
||||
try {
|
||||
// Parse markdown to AST
|
||||
const tree = remark().use(remarkGfm).parse(content);
|
||||
|
||||
// Walk the AST and extract elements
|
||||
let position = 0;
|
||||
|
||||
function visit(node: any, parent?: any) {
|
||||
position++;
|
||||
|
||||
// Extract links
|
||||
if (node.type === 'link') {
|
||||
result.links.push({
|
||||
text: extractText(node),
|
||||
url: node.url,
|
||||
title: node.title,
|
||||
position,
|
||||
});
|
||||
}
|
||||
|
||||
// Extract headers
|
||||
if (node.type === 'heading') {
|
||||
result.headers.push({
|
||||
level: node.depth as 1 | 2 | 3 | 4 | 5 | 6,
|
||||
text: extractText(node),
|
||||
position,
|
||||
});
|
||||
}
|
||||
|
||||
// Extract code blocks
|
||||
if (node.type === 'code') {
|
||||
const lang = node.lang || null;
|
||||
|
||||
// Separate Mermaid diagrams from regular code blocks
|
||||
if (lang === 'mermaid') {
|
||||
result.mermaidBlocks.push({
|
||||
code: node.value,
|
||||
position,
|
||||
});
|
||||
} else {
|
||||
result.codeBlocks.push({
|
||||
language: lang,
|
||||
code: node.value,
|
||||
position,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Extract tables
|
||||
if (node.type === 'table') {
|
||||
const headers: string[] = [];
|
||||
const rows: string[][] = [];
|
||||
|
||||
node.children.forEach((row: any, idx: number) => {
|
||||
const cells = row.children.map((cell: any) => extractText(cell));
|
||||
if (idx === 0) {
|
||||
headers.push(...cells);
|
||||
} else {
|
||||
rows.push(cells);
|
||||
}
|
||||
});
|
||||
|
||||
result.tables.push({
|
||||
headers,
|
||||
rows,
|
||||
position,
|
||||
});
|
||||
}
|
||||
|
||||
// Extract lists
|
||||
if (node.type === 'list') {
|
||||
const items = node.children.map((item: any) => extractText(item));
|
||||
result.lists.push({
|
||||
type: node.ordered ? 'ordered' : 'unordered',
|
||||
items,
|
||||
position,
|
||||
});
|
||||
}
|
||||
|
||||
// Extract blockquotes
|
||||
if (node.type === 'blockquote') {
|
||||
result.blockquotes.push({
|
||||
text: extractText(node),
|
||||
position,
|
||||
});
|
||||
}
|
||||
|
||||
// Recurse into children
|
||||
if (node.children) {
|
||||
node.children.forEach((child: any) => visit(child, node));
|
||||
}
|
||||
}
|
||||
|
||||
visit(tree);
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
// Fail fast on parsing errors
|
||||
throw new Error(`Markdown parsing failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract inline formatting from a text node
|
||||
* This is used for more granular formatting extraction within paragraphs
|
||||
*/
|
||||
export function extractInlineFormatting(node: any): Array<{ type: string; text: string }> {
|
||||
const results: Array<{ type: string; text: string }> = [];
|
||||
|
||||
function visit(n: any, currentFormat?: string) {
|
||||
if (n.type === 'text') {
|
||||
results.push({
|
||||
type: currentFormat || 'normal',
|
||||
text: n.value,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (n.type === 'strong') {
|
||||
n.children.forEach((child: any) => visit(child, 'bold'));
|
||||
return;
|
||||
}
|
||||
|
||||
if (n.type === 'emphasis') {
|
||||
n.children.forEach((child: any) => visit(child, 'italic'));
|
||||
return;
|
||||
}
|
||||
|
||||
if (n.type === 'inlineCode') {
|
||||
results.push({
|
||||
type: 'code',
|
||||
text: n.value,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (n.type === 'delete') {
|
||||
n.children.forEach((child: any) => visit(child, 'strikethrough'));
|
||||
return;
|
||||
}
|
||||
|
||||
if (n.children) {
|
||||
n.children.forEach((child: any) => visit(child, currentFormat));
|
||||
}
|
||||
}
|
||||
|
||||
visit(node);
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse markdown and return enriched AST for rendering
|
||||
* This provides access to the full remark AST for advanced use cases
|
||||
*/
|
||||
export function parseMarkdownToAST(content: string): Root {
|
||||
try {
|
||||
return remark().use(remarkGfm).parse(content);
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to parse markdown to AST: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
242
apps/tenant-app/src/lib/mermaid-renderer.ts
Normal file
242
apps/tenant-app/src/lib/mermaid-renderer.ts
Normal file
@@ -0,0 +1,242 @@
|
||||
/**
|
||||
* Mermaid Diagram Renderer for Export Functionality
|
||||
*
|
||||
* Converts Mermaid diagram code to PNG images for embedding in PDF/DOCX exports.
|
||||
* Uses browser-native Canvas API with size validation and memory management.
|
||||
*
|
||||
* GT 2.0 Compliance:
|
||||
* - No mocks: Real Mermaid rendering
|
||||
* - Fail fast: Size validation before conversion
|
||||
* - Zero complexity: Client-side only, reuses existing Mermaid library
|
||||
*/
|
||||
|
||||
import mermaid from 'mermaid';
|
||||
|
||||
// Browser canvas size limit (32,767px maximum dimension)
|
||||
const MAX_CANVAS_SIZE = 32000; // Safe limit below browser maximum
|
||||
|
||||
export interface DiagramRenderResult {
|
||||
success: boolean;
|
||||
data?: string; // base64 PNG data URL
|
||||
error?: string;
|
||||
width?: number;
|
||||
height?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize Mermaid with export-friendly settings
|
||||
* Only call this once at module load
|
||||
*/
|
||||
let mermaidInitialized = false;
|
||||
|
||||
function initializeMermaid() {
|
||||
if (mermaidInitialized) return;
|
||||
|
||||
mermaid.initialize({
|
||||
startOnLoad: false,
|
||||
theme: 'default', // Use default theme for better PDF/print compatibility
|
||||
themeVariables: {
|
||||
primaryColor: '#3b82f6',
|
||||
primaryTextColor: '#1f2937',
|
||||
primaryBorderColor: '#1e40af',
|
||||
lineColor: '#6b7280',
|
||||
secondaryColor: '#e5e7eb',
|
||||
tertiaryColor: '#f3f4f6',
|
||||
},
|
||||
fontFamily: 'Arial, sans-serif', // Standard font for PDF compatibility
|
||||
flowchart: {
|
||||
useMaxWidth: false,
|
||||
htmlLabels: true,
|
||||
curve: 'basis',
|
||||
},
|
||||
sequence: {
|
||||
useMaxWidth: false,
|
||||
diagramMarginX: 50,
|
||||
diagramMarginY: 10,
|
||||
},
|
||||
suppressErrorRendering: true,
|
||||
errorLevel: 'fatal',
|
||||
logLevel: 'fatal',
|
||||
});
|
||||
|
||||
mermaidInitialized = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse SVG dimensions from SVG string
|
||||
*/
|
||||
function parseSVGDimensions(svgString: string): { width: number; height: number } {
|
||||
const parser = new DOMParser();
|
||||
const svgDoc = parser.parseFromString(svgString, 'image/svg+xml');
|
||||
const svgElement = svgDoc.documentElement;
|
||||
|
||||
// Try to get width/height from attributes
|
||||
let width = parseInt(svgElement.getAttribute('width') || '800');
|
||||
let height = parseInt(svgElement.getAttribute('height') || '600');
|
||||
|
||||
// If width/height are percentages or not set, try viewBox
|
||||
if (isNaN(width) || isNaN(height)) {
|
||||
const viewBox = svgElement.getAttribute('viewBox');
|
||||
if (viewBox) {
|
||||
const parts = viewBox.split(' ');
|
||||
if (parts.length === 4) {
|
||||
width = parseInt(parts[2]);
|
||||
height = parseInt(parts[3]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to reasonable defaults
|
||||
if (isNaN(width) || width <= 0) width = 800;
|
||||
if (isNaN(height) || height <= 0) height = 600;
|
||||
|
||||
return { width, height };
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert SVG string to PNG data URL using Canvas API
|
||||
*/
|
||||
async function svgToPNG(svgString: string, width: number, height: number): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
// Create canvas
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.width = width;
|
||||
canvas.height = height;
|
||||
|
||||
const ctx = canvas.getContext('2d');
|
||||
if (!ctx) {
|
||||
reject(new Error('Failed to get canvas context'));
|
||||
return;
|
||||
}
|
||||
|
||||
// Create image from SVG
|
||||
const img = new Image();
|
||||
|
||||
img.onload = () => {
|
||||
try {
|
||||
// Fill white background (for better PDF rendering)
|
||||
ctx.fillStyle = '#ffffff';
|
||||
ctx.fillRect(0, 0, width, height);
|
||||
|
||||
// Draw SVG image
|
||||
ctx.drawImage(img, 0, 0, width, height);
|
||||
|
||||
// Convert to PNG data URL
|
||||
const pngDataUrl = canvas.toDataURL('image/png');
|
||||
resolve(pngDataUrl);
|
||||
} catch (error) {
|
||||
reject(new Error(`Canvas conversion failed: ${error instanceof Error ? error.message : 'Unknown error'}`));
|
||||
}
|
||||
};
|
||||
|
||||
img.onerror = () => {
|
||||
reject(new Error('Failed to load SVG image'));
|
||||
};
|
||||
|
||||
// CRITICAL FIX: Use base64 data URL directly to avoid canvas tainting
|
||||
// Using createObjectURL causes CORS issues and taints the canvas
|
||||
try {
|
||||
// Encode SVG to base64
|
||||
const base64 = btoa(unescape(encodeURIComponent(svgString)));
|
||||
img.src = `data:image/svg+xml;base64,${base64}`;
|
||||
} catch (error) {
|
||||
reject(new Error(`Failed to encode SVG: ${error instanceof Error ? error.message : 'Unknown error'}`));
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Render a single Mermaid diagram to PNG
|
||||
*
|
||||
* @param code - Mermaid diagram code
|
||||
* @param id - Unique ID for this diagram (optional, auto-generated if not provided)
|
||||
* @returns DiagramRenderResult with PNG data or error
|
||||
*/
|
||||
export async function renderMermaidToPNG(code: string, id?: string): Promise<DiagramRenderResult> {
|
||||
// Initialize Mermaid if needed
|
||||
initializeMermaid();
|
||||
|
||||
// Validate input
|
||||
if (!code || typeof code !== 'string' || code.trim().length === 0) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Invalid or empty Mermaid code',
|
||||
};
|
||||
}
|
||||
|
||||
// Generate unique ID
|
||||
const diagramId = id || `mermaid-export-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
||||
|
||||
try {
|
||||
// Render Mermaid to SVG
|
||||
const { svg } = await mermaid.render(diagramId, code.trim());
|
||||
|
||||
// Parse SVG dimensions
|
||||
const { width, height } = parseSVGDimensions(svg);
|
||||
|
||||
// CRITICAL: Validate size before Canvas conversion
|
||||
if (width > MAX_CANVAS_SIZE || height > MAX_CANVAS_SIZE) {
|
||||
return {
|
||||
success: false,
|
||||
error: `Diagram too large: ${width}x${height}px exceeds ${MAX_CANVAS_SIZE}px limit`,
|
||||
width,
|
||||
height,
|
||||
};
|
||||
}
|
||||
|
||||
// Convert SVG to PNG
|
||||
const pngDataUrl = await svgToPNG(svg, width, height);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: pngDataUrl,
|
||||
width,
|
||||
height,
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
success: false,
|
||||
error: `Mermaid rendering failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Render multiple Mermaid diagrams sequentially (memory-efficient)
|
||||
*
|
||||
* @param diagrams - Array of Mermaid diagram code strings
|
||||
* @param onProgress - Optional callback for progress updates
|
||||
* @returns Array of DiagramRenderResults
|
||||
*/
|
||||
export async function renderMultipleDiagrams(
|
||||
diagrams: string[],
|
||||
onProgress?: (current: number, total: number) => void
|
||||
): Promise<DiagramRenderResult[]> {
|
||||
const results: DiagramRenderResult[] = [];
|
||||
|
||||
// Process diagrams sequentially to avoid memory issues
|
||||
for (let i = 0; i < diagrams.length; i++) {
|
||||
// Update progress
|
||||
if (onProgress) {
|
||||
onProgress(i + 1, diagrams.length);
|
||||
}
|
||||
|
||||
// Render diagram
|
||||
const result = await renderMermaidToPNG(diagrams[i], `diagram-${i}`);
|
||||
results.push(result);
|
||||
|
||||
// Allow garbage collection between renders
|
||||
// This prevents memory buildup when rendering many diagrams
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a text placeholder for failed diagrams
|
||||
*/
|
||||
export function createDiagramPlaceholder(error?: string): string {
|
||||
const message = error ? `[Diagram rendering failed: ${error}]` : '[Diagram rendering failed]';
|
||||
return message;
|
||||
}
|
||||
800
apps/tenant-app/src/lib/mock-api.ts
Normal file
800
apps/tenant-app/src/lib/mock-api.ts
Normal file
@@ -0,0 +1,800 @@
|
||||
/**
|
||||
* Mock API Service for GT 2.0 Tenant Application
|
||||
* Provides realistic mock data for development and testing
|
||||
*/
|
||||
|
||||
export const mockApi = {
|
||||
// Auth endpoints
|
||||
auth: {
|
||||
login: async (email: string, password: string) => ({
|
||||
access_token: 'mock-tenant-jwt-token',
|
||||
refresh_token: 'mock-tenant-refresh-token',
|
||||
user: {
|
||||
id: 'user-1',
|
||||
email,
|
||||
full_name: 'Jane User',
|
||||
tenant: 'Test Company',
|
||||
role: 'user',
|
||||
avatar_url: null,
|
||||
}
|
||||
}),
|
||||
|
||||
logout: async () => ({ success: true }),
|
||||
|
||||
getProfile: async () => ({
|
||||
id: 'user-1',
|
||||
email: 'jane@test-company.com',
|
||||
full_name: 'Jane User',
|
||||
tenant: 'Test Company',
|
||||
role: 'user',
|
||||
avatar_url: null,
|
||||
preferences: {
|
||||
theme: 'light',
|
||||
notifications: true,
|
||||
ai_personality: 'balanced',
|
||||
}
|
||||
}),
|
||||
},
|
||||
|
||||
// Conversations endpoints
|
||||
conversations: {
|
||||
list: async () => ({
|
||||
conversations: [
|
||||
{
|
||||
id: 'conv-1',
|
||||
title: 'Research on AI Ethics',
|
||||
agent_id: 'asst-1',
|
||||
agent_name: 'Research Agent',
|
||||
last_message: 'I can help you explore various ethical frameworks...',
|
||||
created_at: '2024-01-20T10:00:00Z',
|
||||
updated_at: '2024-01-20T11:30:00Z',
|
||||
message_count: 12,
|
||||
},
|
||||
{
|
||||
id: 'conv-2',
|
||||
title: 'Code Review Session',
|
||||
agent_id: 'asst-2',
|
||||
agent_name: 'Coding Agent',
|
||||
last_message: 'The refactored code looks much cleaner now...',
|
||||
created_at: '2024-01-19T14:00:00Z',
|
||||
updated_at: '2024-01-19T15:45:00Z',
|
||||
message_count: 8,
|
||||
},
|
||||
{
|
||||
id: 'conv-3',
|
||||
title: 'Security Analysis',
|
||||
agent_id: 'asst-3',
|
||||
agent_name: 'Cyber Agent',
|
||||
last_message: 'The vulnerability assessment shows...',
|
||||
created_at: '2024-01-18T09:00:00Z',
|
||||
updated_at: '2024-01-18T10:30:00Z',
|
||||
message_count: 6,
|
||||
}
|
||||
],
|
||||
total: 3
|
||||
}),
|
||||
|
||||
create: async (data: any) => ({
|
||||
id: `conv-${Date.now()}`,
|
||||
title: data.title || 'New Conversation',
|
||||
agent_id: data.agent_id,
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
message_count: 0,
|
||||
}),
|
||||
|
||||
get: async (id: string) => ({
|
||||
id,
|
||||
title: 'Research on AI Ethics',
|
||||
agent_id: 'asst-1',
|
||||
agent_name: 'Research Agent',
|
||||
system_prompt: 'You are a helpful research agent...',
|
||||
created_at: '2024-01-20T10:00:00Z',
|
||||
updated_at: '2024-01-20T11:30:00Z',
|
||||
message_count: 12,
|
||||
context_sources: ['doc-1', 'doc-2'],
|
||||
}),
|
||||
|
||||
getMessages: async (id: string) => ({
|
||||
messages: [
|
||||
{
|
||||
id: 'msg-1',
|
||||
role: 'user',
|
||||
content: 'Can you help me understand the ethical implications of AI in healthcare?',
|
||||
created_at: '2024-01-20T10:00:00Z',
|
||||
},
|
||||
{
|
||||
id: 'msg-2',
|
||||
role: 'agent',
|
||||
content: `I'd be happy to help you explore the ethical implications of AI in healthcare. This is a crucial topic with several key dimensions:
|
||||
|
||||
## Key Ethical Considerations
|
||||
|
||||
### 1. **Patient Privacy and Data Protection**
|
||||
- AI systems require vast amounts of patient data for training
|
||||
- Concerns about data breaches and unauthorized access
|
||||
- Need for robust anonymization techniques
|
||||
|
||||
### 2. **Bias and Fairness**
|
||||
- AI models can perpetuate existing healthcare disparities
|
||||
- Training data often underrepresents minority populations
|
||||
- Risk of discriminatory outcomes in diagnosis and treatment recommendations
|
||||
|
||||
### 3. **Transparency and Explainability**
|
||||
- "Black box" nature of many AI algorithms
|
||||
- Physicians and patients need to understand AI-driven decisions
|
||||
- Regulatory requirements for explainable AI in healthcare
|
||||
|
||||
### 4. **Accountability and Liability**
|
||||
- Who is responsible when AI makes an error?
|
||||
- Legal frameworks still evolving
|
||||
- Need for clear governance structures
|
||||
|
||||
Would you like to explore any of these areas in more detail?`,
|
||||
created_at: '2024-01-20T10:01:00Z',
|
||||
model_used: 'gpt-4',
|
||||
tokens_used: 245,
|
||||
},
|
||||
{
|
||||
id: 'msg-3',
|
||||
role: 'user',
|
||||
content: 'Yes, can you elaborate on the bias and fairness issues?',
|
||||
created_at: '2024-01-20T10:05:00Z',
|
||||
},
|
||||
{
|
||||
id: 'msg-4',
|
||||
role: 'agent',
|
||||
content: `Certainly! Bias and fairness in healthcare AI is a critical issue that can have life-altering consequences. Let me break this down:
|
||||
|
||||
## Sources of Bias in Healthcare AI
|
||||
|
||||
### Historical Data Bias
|
||||
- Medical datasets often reflect past inequities in healthcare access
|
||||
- Underdiagnosis in certain populations gets encoded into AI models
|
||||
- Example: Skin cancer detection AI trained primarily on light-skinned patients
|
||||
|
||||
### Representation Bias
|
||||
- Clinical trials historically underrepresented women and minorities
|
||||
- Rural populations often excluded from datasets
|
||||
- Socioeconomic factors affecting data availability
|
||||
|
||||
## Real-World Consequences
|
||||
|
||||
1. **Diagnostic Disparities**: AI may be less accurate for underrepresented groups
|
||||
2. **Treatment Recommendations**: Biased algorithms might suggest different treatments based on demographics rather than medical need
|
||||
3. **Resource Allocation**: AI-driven triage systems might systematically disadvantage certain populations
|
||||
|
||||
## Mitigation Strategies
|
||||
|
||||
- **Diverse Training Data**: Actively collecting representative datasets
|
||||
- **Bias Testing**: Regular audits for discriminatory patterns
|
||||
- **Fairness Constraints**: Building equity metrics into model optimization
|
||||
- **Community Involvement**: Including affected populations in AI development
|
||||
|
||||
The goal is to ensure AI enhances healthcare equity rather than exacerbating existing disparities.`,
|
||||
created_at: '2024-01-20T10:06:00Z',
|
||||
model_used: 'gpt-4',
|
||||
tokens_used: 312,
|
||||
}
|
||||
],
|
||||
total: 4
|
||||
}),
|
||||
|
||||
sendMessage: async (conversationId: string, content: string) => ({
|
||||
id: `msg-${Date.now()}`,
|
||||
role: 'agent',
|
||||
content: `I understand you're asking about "${content}". Let me help you with that...`,
|
||||
created_at: new Date().toISOString(),
|
||||
model_used: 'gpt-4',
|
||||
tokens_used: Math.floor(Math.random() * 500) + 100,
|
||||
}),
|
||||
},
|
||||
|
||||
// Agents endpoints
|
||||
agents: {
|
||||
list: async () => ({
|
||||
data: [
|
||||
{
|
||||
id: 'asst-1',
|
||||
name: 'Research Agent',
|
||||
description: 'Specialized in research, analysis, and information synthesis',
|
||||
template_id: 'research_agent',
|
||||
category: 'research',
|
||||
personality_config: { tone: 'formal', explanation_depth: 'detailed' },
|
||||
resource_preferences: { primary_llm: 'gpt-4', temperature: 0.7 },
|
||||
tags: ['research', 'analysis'],
|
||||
is_favorite: false,
|
||||
conversation_count: 15,
|
||||
total_cost_cents: 1250,
|
||||
created_at: '2024-01-15T10:00:00Z',
|
||||
updated_at: '2024-01-20T11:30:00Z',
|
||||
},
|
||||
{
|
||||
id: 'asst-2',
|
||||
name: 'Coding Agent',
|
||||
description: 'Expert in software development and code review',
|
||||
template_id: 'coding_agent',
|
||||
category: 'development',
|
||||
personality_config: { tone: 'technical', explanation_depth: 'code-focused' },
|
||||
resource_preferences: { primary_llm: 'claude-3-sonnet', temperature: 0.3 },
|
||||
tags: ['coding', 'development'],
|
||||
is_favorite: true,
|
||||
conversation_count: 12,
|
||||
total_cost_cents: 890,
|
||||
created_at: '2024-01-14T10:00:00Z',
|
||||
updated_at: '2024-01-19T15:45:00Z',
|
||||
},
|
||||
{
|
||||
id: 'asst-3',
|
||||
name: 'Cybersecurity Agent',
|
||||
description: 'Threat detection and security analysis specialist',
|
||||
template_id: 'cyber_agent',
|
||||
category: 'cybersecurity',
|
||||
personality_config: { tone: 'professional', explanation_depth: 'technical' },
|
||||
resource_preferences: { primary_llm: 'gpt-4', temperature: 0.2 },
|
||||
tags: ['security', 'analysis'],
|
||||
is_favorite: false,
|
||||
conversation_count: 8,
|
||||
total_cost_cents: 670,
|
||||
created_at: '2024-01-13T10:00:00Z',
|
||||
updated_at: '2024-01-18T10:30:00Z',
|
||||
}
|
||||
],
|
||||
total: 3,
|
||||
limit: 50,
|
||||
offset: 0
|
||||
}),
|
||||
|
||||
create: async (data: any) => ({
|
||||
id: `asst-${Date.now()}`,
|
||||
...data,
|
||||
created_at: new Date().toISOString(),
|
||||
conversation_count: 0,
|
||||
}),
|
||||
|
||||
get: async (id: string) => ({
|
||||
id,
|
||||
name: 'Research Agent',
|
||||
description: 'Specialized in research, analysis, and information synthesis',
|
||||
template_id: 'research_agent',
|
||||
category: 'research',
|
||||
personality_config: { tone: 'formal', explanation_depth: 'detailed' },
|
||||
resource_preferences: { primary_llm: 'gpt-4', temperature: 0.7, max_tokens: 4000 },
|
||||
tags: ['research', 'analysis'],
|
||||
is_favorite: false,
|
||||
conversation_count: 15,
|
||||
total_cost_cents: 1250,
|
||||
created_at: '2024-01-15T10:00:00Z',
|
||||
updated_at: '2024-01-20T11:30:00Z',
|
||||
}),
|
||||
|
||||
update: async (id: string, data: any) => ({
|
||||
id,
|
||||
...data,
|
||||
updated_at: new Date().toISOString(),
|
||||
}),
|
||||
|
||||
delete: async (id: string) => ({ success: true }),
|
||||
},
|
||||
|
||||
// Backward compatibility: agents endpoint delegates to agents
|
||||
agents: {
|
||||
list: async () => {
|
||||
const agentResponse = await mockApi.agents.list();
|
||||
return {
|
||||
agents: agentResponse.data,
|
||||
total: agentResponse.total
|
||||
};
|
||||
},
|
||||
get: async (id: string) => mockApi.agents.get(id),
|
||||
create: async (data: any) => mockApi.agents.create(data),
|
||||
update: async (id: string, data: any) => mockApi.agents.update(id, data),
|
||||
delete: async (id: string) => mockApi.agents.delete(id),
|
||||
},
|
||||
|
||||
// Documents endpoints
|
||||
documents: {
|
||||
list: async () => ({
|
||||
documents: [
|
||||
{
|
||||
id: 'doc-1',
|
||||
filename: 'AI_Ethics_Framework.pdf',
|
||||
file_type: 'application/pdf',
|
||||
file_size: 2456789,
|
||||
processing_status: 'completed',
|
||||
chunk_count: 45,
|
||||
uploaded_by: 'jane@test-company.com',
|
||||
created_at: '2024-01-18T10:00:00Z',
|
||||
processed_at: '2024-01-18T10:05:00Z',
|
||||
},
|
||||
{
|
||||
id: 'doc-2',
|
||||
filename: 'Healthcare_Data_Analysis.docx',
|
||||
file_type: 'application/docx',
|
||||
file_size: 1234567,
|
||||
processing_status: 'completed',
|
||||
chunk_count: 32,
|
||||
uploaded_by: 'jane@test-company.com',
|
||||
created_at: '2024-01-17T14:00:00Z',
|
||||
processed_at: '2024-01-17T14:03:00Z',
|
||||
},
|
||||
{
|
||||
id: 'doc-3',
|
||||
filename: 'Security_Best_Practices.md',
|
||||
file_type: 'text/markdown',
|
||||
file_size: 98765,
|
||||
processing_status: 'processing',
|
||||
chunk_count: 0,
|
||||
uploaded_by: 'jane@test-company.com',
|
||||
created_at: '2024-01-20T12:00:00Z',
|
||||
processed_at: null,
|
||||
}
|
||||
],
|
||||
total: 3,
|
||||
storage_used: 3790121,
|
||||
storage_limit: 10737418240, // 10GB
|
||||
}),
|
||||
|
||||
upload: async (file: File) => ({
|
||||
id: `doc-${Date.now()}`,
|
||||
filename: file.name,
|
||||
file_type: file.type,
|
||||
file_size: file.size,
|
||||
processing_status: 'pending',
|
||||
created_at: new Date().toISOString(),
|
||||
}),
|
||||
|
||||
delete: async (id: string) => ({ success: true }),
|
||||
|
||||
getChunks: async (id: string) => ({
|
||||
chunks: [
|
||||
{
|
||||
id: 'chunk-1',
|
||||
document_id: id,
|
||||
content: 'This is a sample chunk from the document...',
|
||||
chunk_index: 0,
|
||||
tokens: 125,
|
||||
},
|
||||
{
|
||||
id: 'chunk-2',
|
||||
document_id: id,
|
||||
content: 'Another chunk with important information...',
|
||||
chunk_index: 1,
|
||||
tokens: 98,
|
||||
}
|
||||
],
|
||||
total: 2,
|
||||
}),
|
||||
},
|
||||
|
||||
// RAG endpoints
|
||||
rag: {
|
||||
search: async (query: string, datasetIds?: string[]) => ({
|
||||
results: [
|
||||
{
|
||||
id: 'result-1',
|
||||
content: 'AI ethics frameworks typically consider principles like fairness, transparency, and accountability...',
|
||||
source: 'AI_Ethics_Framework.pdf',
|
||||
relevance_score: 0.92,
|
||||
chunk_id: 'chunk-15',
|
||||
page_number: 12,
|
||||
},
|
||||
{
|
||||
id: 'result-2',
|
||||
content: 'Healthcare data must be handled with strict privacy controls and patient consent...',
|
||||
source: 'Healthcare_Data_Analysis.docx',
|
||||
relevance_score: 0.87,
|
||||
chunk_id: 'chunk-23',
|
||||
page_number: 8,
|
||||
}
|
||||
],
|
||||
total: 2,
|
||||
query_embedding_time_ms: 45,
|
||||
search_time_ms: 123,
|
||||
}),
|
||||
|
||||
getDatasets: async () => ({
|
||||
datasets: [
|
||||
{
|
||||
id: 'dataset-1',
|
||||
name: 'Research Papers',
|
||||
description: 'Collection of AI and healthcare research papers',
|
||||
document_count: 12,
|
||||
chunk_count: 456,
|
||||
vector_count: 456,
|
||||
embedding_model: 'text-embedding-3-small',
|
||||
status: 'active',
|
||||
created_at: '2024-01-10T10:00:00Z',
|
||||
},
|
||||
{
|
||||
id: 'dataset-2',
|
||||
name: 'Security Documentation',
|
||||
description: 'Cybersecurity best practices and guidelines',
|
||||
document_count: 8,
|
||||
chunk_count: 234,
|
||||
vector_count: 234,
|
||||
embedding_model: 'text-embedding-3-small',
|
||||
status: 'active',
|
||||
created_at: '2024-01-12T10:00:00Z',
|
||||
}
|
||||
],
|
||||
total: 2,
|
||||
}),
|
||||
|
||||
createDataset: async (data: any) => ({
|
||||
id: `dataset-${Date.now()}`,
|
||||
...data,
|
||||
document_count: 0,
|
||||
chunk_count: 0,
|
||||
vector_count: 0,
|
||||
status: 'active',
|
||||
created_at: new Date().toISOString(),
|
||||
}),
|
||||
},
|
||||
|
||||
// Agents endpoints
|
||||
agents: {
|
||||
list: async () => ({
|
||||
agents: [
|
||||
{
|
||||
id: 'agent-1',
|
||||
name: 'Research Specialist',
|
||||
agent_type: 'research',
|
||||
description: 'Autonomous research agent for deep analysis',
|
||||
status: 'idle',
|
||||
capabilities: ['web_search', 'document_synthesis', 'report_generation'],
|
||||
execution_count: 24,
|
||||
last_execution: '2024-01-19T16:00:00Z',
|
||||
},
|
||||
{
|
||||
id: 'agent-2',
|
||||
name: 'Code Reviewer',
|
||||
agent_type: 'coding',
|
||||
description: 'Automated code review and improvement suggestions',
|
||||
status: 'idle',
|
||||
capabilities: ['code_analysis', 'security_scanning', 'refactoring'],
|
||||
execution_count: 18,
|
||||
last_execution: '2024-01-18T14:00:00Z',
|
||||
}
|
||||
],
|
||||
total: 2,
|
||||
}),
|
||||
|
||||
create: async (data: any) => ({
|
||||
id: `agent-${Date.now()}`,
|
||||
...data,
|
||||
status: 'idle',
|
||||
execution_count: 0,
|
||||
created_at: new Date().toISOString(),
|
||||
}),
|
||||
|
||||
execute: async (agentId: string, task: string) => ({
|
||||
execution_id: `exec-${Date.now()}`,
|
||||
agent_id: agentId,
|
||||
task,
|
||||
status: 'running',
|
||||
started_at: new Date().toISOString(),
|
||||
estimated_duration: 30,
|
||||
}),
|
||||
},
|
||||
|
||||
// External Services endpoints
|
||||
services: {
|
||||
list: async () => ({
|
||||
services: [
|
||||
{
|
||||
id: 'svc-1',
|
||||
name: 'Canvas LMS',
|
||||
type: 'educational_service',
|
||||
description: 'Learning Management System',
|
||||
icon: '📚',
|
||||
status: 'available',
|
||||
category: 'education',
|
||||
},
|
||||
{
|
||||
id: 'svc-2',
|
||||
name: 'CTFd Platform',
|
||||
type: 'cybersecurity_service',
|
||||
description: 'Capture The Flag competition platform',
|
||||
icon: '🚩',
|
||||
status: 'available',
|
||||
category: 'cybersecurity',
|
||||
},
|
||||
{
|
||||
id: 'svc-3',
|
||||
name: 'Jupyter Hub',
|
||||
type: 'development_service',
|
||||
description: 'Interactive development environment',
|
||||
icon: '📓',
|
||||
status: 'available',
|
||||
category: 'development',
|
||||
},
|
||||
{
|
||||
id: 'svc-4',
|
||||
name: 'Guacamole',
|
||||
type: 'remote_access_service',
|
||||
description: 'Remote desktop gateway',
|
||||
icon: '🖥️',
|
||||
status: 'available',
|
||||
category: 'infrastructure',
|
||||
}
|
||||
],
|
||||
total: 4,
|
||||
}),
|
||||
|
||||
getEmbedConfig: async (serviceId: string) => ({
|
||||
iframe_url: `https://${serviceId}.test-company.gt2.com`,
|
||||
sandbox_attributes: ['allow-same-origin', 'allow-scripts', 'allow-forms'],
|
||||
authentication_token: 'mock-sso-token',
|
||||
session_data: {
|
||||
user_id: 'user-1',
|
||||
tenant_id: 'test-company',
|
||||
permissions: ['read', 'write'],
|
||||
},
|
||||
}),
|
||||
},
|
||||
|
||||
// Projects endpoints
|
||||
projects: {
|
||||
list: async () => ({
|
||||
projects: [
|
||||
{
|
||||
id: 'proj-1',
|
||||
name: 'AI Ethics Research',
|
||||
description: 'Comprehensive research on ethical implications of AI in healthcare applications',
|
||||
project_type: 'research',
|
||||
status: 'active',
|
||||
completion_percentage: 65,
|
||||
linked_resources: ['gpt-4', 'semantic-search', 'document-processor'],
|
||||
collaborators: [
|
||||
{ id: 'user-2', name: 'Alice Johnson' },
|
||||
{ id: 'user-3', name: 'Bob Smith' }
|
||||
],
|
||||
time_invested_minutes: 480,
|
||||
ai_interactions_count: 145,
|
||||
created_at: '2024-01-10T08:00:00Z',
|
||||
last_activity: '2024-01-25T14:30:00Z'
|
||||
},
|
||||
{
|
||||
id: 'proj-2',
|
||||
name: 'Security Vulnerability Analysis',
|
||||
description: 'Analyzing potential security vulnerabilities in cloud infrastructure',
|
||||
project_type: 'analysis',
|
||||
status: 'active',
|
||||
completion_percentage: 30,
|
||||
linked_resources: ['cyber-analyst', 'security-scanner'],
|
||||
collaborators: [
|
||||
{ id: 'user-4', name: 'David Chen' }
|
||||
],
|
||||
time_invested_minutes: 240,
|
||||
ai_interactions_count: 89,
|
||||
created_at: '2024-01-15T10:00:00Z',
|
||||
last_activity: '2024-01-25T16:00:00Z'
|
||||
},
|
||||
{
|
||||
id: 'proj-3',
|
||||
name: 'Customer Sentiment Dashboard',
|
||||
description: 'Building a real-time dashboard for customer sentiment analysis',
|
||||
project_type: 'development',
|
||||
status: 'completed',
|
||||
completion_percentage: 100,
|
||||
linked_resources: ['coding-agent', 'github-connector'],
|
||||
collaborators: [],
|
||||
time_invested_minutes: 960,
|
||||
ai_interactions_count: 234,
|
||||
created_at: '2023-12-01T09:00:00Z',
|
||||
last_activity: '2024-01-20T17:00:00Z'
|
||||
},
|
||||
{
|
||||
id: 'proj-4',
|
||||
name: 'Market Trend Analysis Q1',
|
||||
description: 'Quarterly market trend analysis and competitor benchmarking',
|
||||
project_type: 'analysis',
|
||||
status: 'on_hold',
|
||||
completion_percentage: 45,
|
||||
linked_resources: ['research-agent', 'web-search'],
|
||||
collaborators: [
|
||||
{ id: 'user-5', name: 'Emma Wilson' },
|
||||
{ id: 'user-6', name: 'Frank Lee' },
|
||||
{ id: 'user-7', name: 'Grace Kim' }
|
||||
],
|
||||
time_invested_minutes: 320,
|
||||
ai_interactions_count: 67,
|
||||
created_at: '2024-01-05T11:00:00Z',
|
||||
last_activity: '2024-01-18T13:00:00Z'
|
||||
}
|
||||
],
|
||||
total: 4
|
||||
}),
|
||||
|
||||
create: async (data: any) => ({
|
||||
id: `proj-${Date.now()}`,
|
||||
...data,
|
||||
status: 'active',
|
||||
completion_percentage: 0,
|
||||
created_at: new Date().toISOString(),
|
||||
}),
|
||||
},
|
||||
|
||||
// Games & AI Literacy endpoints
|
||||
games: {
|
||||
list: async () => ({
|
||||
games: [
|
||||
{
|
||||
id: 'game-1',
|
||||
name: 'Strategic Chess',
|
||||
type: 'chess',
|
||||
category: 'strategic_game',
|
||||
description: 'Improve strategic thinking with AI opponents',
|
||||
icon: '♟️',
|
||||
difficulty_levels: ['beginner', 'intermediate', 'expert'],
|
||||
user_rating: 1450,
|
||||
games_played: 23,
|
||||
win_rate: 0.43,
|
||||
},
|
||||
{
|
||||
id: 'game-2',
|
||||
name: 'Logic Puzzles',
|
||||
type: 'logic_puzzle',
|
||||
category: 'puzzle',
|
||||
description: 'Lateral thinking and logical deduction challenges',
|
||||
icon: '🧩',
|
||||
difficulty_levels: ['easy', 'medium', 'hard'],
|
||||
puzzles_solved: 45,
|
||||
average_time: 8.5,
|
||||
hint_usage_rate: 0.2,
|
||||
},
|
||||
{
|
||||
id: 'game-3',
|
||||
name: 'Ethical Dilemmas',
|
||||
type: 'philosophical_dilemma',
|
||||
category: 'philosophy',
|
||||
description: 'Explore ethical frameworks through scenarios',
|
||||
icon: '🤔',
|
||||
scenarios_completed: 12,
|
||||
frameworks_explored: ['utilitarian', 'deontological', 'virtue_ethics'],
|
||||
depth_score: 82,
|
||||
}
|
||||
],
|
||||
total: 3,
|
||||
achievements: [
|
||||
{ id: 'ach-1', name: 'First Victory', icon: '🏆', earned: true },
|
||||
{ id: 'ach-2', name: 'Strategic Thinker', icon: '🧠', earned: false },
|
||||
{ id: 'ach-3', name: 'Problem Solver', icon: '💡', earned: true },
|
||||
],
|
||||
}),
|
||||
|
||||
startGame: async (gameType: string, options?: any) => ({
|
||||
game_id: `game-${Date.now()}`,
|
||||
game_type: gameType,
|
||||
initial_state: {
|
||||
board: gameType === 'chess' ? 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR' : null,
|
||||
turn: 'player',
|
||||
time_remaining: 600,
|
||||
},
|
||||
ai_opponent: {
|
||||
name: 'AI Agent',
|
||||
difficulty: options?.difficulty || 'intermediate',
|
||||
personality: 'teaching',
|
||||
},
|
||||
}),
|
||||
|
||||
makeMove: async (gameId: string, move: any) => ({
|
||||
game_id: gameId,
|
||||
player_move: move,
|
||||
ai_response: {
|
||||
move: 'e7e5',
|
||||
explanation: 'I\'m developing my center control...',
|
||||
alternative_moves: ['d7d5', 'g8f6'],
|
||||
},
|
||||
updated_state: {
|
||||
board: 'rnbqkbnr/pppp1ppp/8/4p3/4P3/8/PPPP1PPP/RNBQKBNR',
|
||||
turn: 'player',
|
||||
evaluation: 0.3,
|
||||
},
|
||||
game_status: 'ongoing',
|
||||
}),
|
||||
|
||||
getProgress: async () => ({
|
||||
overall_progress: {
|
||||
level: 5,
|
||||
experience: 2345,
|
||||
next_level_xp: 3000,
|
||||
rank: 'Strategic Thinker',
|
||||
},
|
||||
skill_metrics: {
|
||||
strategic_thinking: 72,
|
||||
logical_reasoning: 85,
|
||||
ethical_reasoning: 68,
|
||||
problem_solving: 79,
|
||||
ai_collaboration: 91,
|
||||
},
|
||||
learning_streak: 7,
|
||||
total_time_spent: 1234, // minutes
|
||||
recommendations: [
|
||||
'Try the advanced chess puzzles to improve tactical vision',
|
||||
'Explore more ethical dilemmas to strengthen moral reasoning',
|
||||
],
|
||||
}),
|
||||
},
|
||||
|
||||
// Projects endpoints
|
||||
projects: {
|
||||
list: async () => ({
|
||||
projects: [
|
||||
{
|
||||
id: 'proj-1',
|
||||
name: 'AI Ethics Research',
|
||||
description: 'Comprehensive research on AI ethics in healthcare',
|
||||
project_type: 'research',
|
||||
status: 'active',
|
||||
completion_percentage: 65,
|
||||
document_count: 8,
|
||||
conversation_count: 3,
|
||||
created_at: '2024-01-10T10:00:00Z',
|
||||
last_activity: '2024-01-20T11:30:00Z',
|
||||
},
|
||||
{
|
||||
id: 'proj-2',
|
||||
name: 'Security Audit Tool',
|
||||
description: 'Development of automated security audit tool',
|
||||
project_type: 'development',
|
||||
status: 'active',
|
||||
completion_percentage: 40,
|
||||
document_count: 5,
|
||||
conversation_count: 2,
|
||||
created_at: '2024-01-12T10:00:00Z',
|
||||
last_activity: '2024-01-19T15:00:00Z',
|
||||
}
|
||||
],
|
||||
total: 2,
|
||||
}),
|
||||
|
||||
create: async (data: any) => ({
|
||||
id: `proj-${Date.now()}`,
|
||||
...data,
|
||||
status: 'active',
|
||||
completion_percentage: 0,
|
||||
document_count: 0,
|
||||
conversation_count: 0,
|
||||
created_at: new Date().toISOString(),
|
||||
}),
|
||||
|
||||
get: async (id: string) => ({
|
||||
id,
|
||||
name: 'AI Ethics Research',
|
||||
description: 'Comprehensive research on AI ethics in healthcare',
|
||||
project_type: 'research',
|
||||
status: 'active',
|
||||
completion_percentage: 65,
|
||||
associated_resources: ['asst-1', 'dataset-1'],
|
||||
document_references: ['doc-1', 'doc-2'],
|
||||
conversation_references: ['conv-1'],
|
||||
created_at: '2024-01-10T10:00:00Z',
|
||||
last_activity: '2024-01-20T11:30:00Z',
|
||||
time_invested_minutes: 450,
|
||||
ai_interactions_count: 234,
|
||||
}),
|
||||
},
|
||||
|
||||
// Settings endpoints
|
||||
settings: {
|
||||
getPreferences: async () => ({
|
||||
theme: 'light',
|
||||
language: 'en',
|
||||
notifications_enabled: true,
|
||||
ai_personality: 'balanced',
|
||||
learning_style: 'interactive',
|
||||
difficulty_preference: 'adaptive',
|
||||
help_system_enabled: true,
|
||||
usage_analytics_enabled: true,
|
||||
}),
|
||||
|
||||
updatePreferences: async (data: any) => ({
|
||||
...data,
|
||||
updated_at: new Date().toISOString(),
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
export default mockApi;
|
||||
174
apps/tenant-app/src/lib/notepad-extractor.ts
Normal file
174
apps/tenant-app/src/lib/notepad-extractor.ts
Normal file
@@ -0,0 +1,174 @@
|
||||
interface AINotePadContent {
|
||||
type: 'code' | 'mermaid' | 'text' | 'json' | 'html' | 'markdown';
|
||||
content: string;
|
||||
language?: string;
|
||||
title?: string;
|
||||
}
|
||||
|
||||
// Cache for extraction results to avoid re-parsing on every render
|
||||
const extractionCache = new Map<string, {
|
||||
segments: Array<{type: 'text' | 'notepad', content: string, notepadData?: AINotePadContent}>,
|
||||
hasNotepads: boolean
|
||||
}>();
|
||||
|
||||
export function extractNotePadContent(text: string): {
|
||||
segments: Array<{type: 'text' | 'notepad', content: string, notepadData?: AINotePadContent}>,
|
||||
hasNotepads: boolean
|
||||
} {
|
||||
// Check cache first
|
||||
if (extractionCache.has(text)) {
|
||||
return extractionCache.get(text)!;
|
||||
}
|
||||
const segments: Array<{type: 'text' | 'notepad', content: string, notepadData?: AINotePadContent}> = [];
|
||||
|
||||
// Pattern to match code blocks with language
|
||||
const codeBlockPattern = /```(\w+)?\n([\s\S]*?)```/g;
|
||||
|
||||
let lastIndex = 0;
|
||||
let match;
|
||||
let hasNotepads = false;
|
||||
|
||||
while ((match = codeBlockPattern.exec(text)) !== null) {
|
||||
const [fullMatch, language = 'text', content] = match;
|
||||
|
||||
// Add text before this code block
|
||||
if (match.index > lastIndex) {
|
||||
const textBefore = text.slice(lastIndex, match.index);
|
||||
if (textBefore.trim()) {
|
||||
segments.push({
|
||||
type: 'text',
|
||||
content: textBefore
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Determine if this should go in a notepad
|
||||
const shouldExtract = shouldExtractToNotepad(language, content);
|
||||
|
||||
if (shouldExtract) {
|
||||
const type = getContentType(language);
|
||||
const title = generateTitle(type, language, content);
|
||||
|
||||
segments.push({
|
||||
type: 'notepad',
|
||||
content: '', // Empty content for notepad segments
|
||||
notepadData: {
|
||||
type,
|
||||
content: content.trim(),
|
||||
language: language || undefined,
|
||||
title
|
||||
}
|
||||
});
|
||||
hasNotepads = true;
|
||||
} else {
|
||||
// Keep as regular text/code
|
||||
segments.push({
|
||||
type: 'text',
|
||||
content: fullMatch
|
||||
});
|
||||
}
|
||||
|
||||
lastIndex = match.index + fullMatch.length;
|
||||
}
|
||||
|
||||
// Add remaining text after last match
|
||||
if (lastIndex < text.length) {
|
||||
const remainingText = text.slice(lastIndex);
|
||||
if (remainingText.trim()) {
|
||||
segments.push({
|
||||
type: 'text',
|
||||
content: remainingText
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// If no code blocks were found, return the original text as a single segment
|
||||
if (segments.length === 0) {
|
||||
segments.push({
|
||||
type: 'text',
|
||||
content: text
|
||||
});
|
||||
}
|
||||
|
||||
// Cache the result before returning
|
||||
const result = { segments, hasNotepads };
|
||||
extractionCache.set(text, result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function shouldExtractToNotepad(language: string, content: string): boolean {
|
||||
// Extract if it's a Mermaid diagram
|
||||
if (language === 'mermaid' || language === 'mmd') {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Extract if it's a long code block (more than 10 lines)
|
||||
if (content.split('\n').length > 10) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Extract if it's HTML content
|
||||
if (language === 'html' || content.includes('<html') || content.includes('<!DOCTYPE')) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Extract if it's JSON and more than 5 lines
|
||||
if (language === 'json' && content.split('\n').length > 5) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Extract if it contains complex structures
|
||||
if (content.includes('function') && content.includes('{') && content.split('\n').length > 8) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
function getContentType(language: string): AINotePadContent['type'] {
|
||||
switch (language) {
|
||||
case 'mermaid':
|
||||
case 'mmd':
|
||||
return 'mermaid';
|
||||
case 'json':
|
||||
return 'json';
|
||||
case 'html':
|
||||
return 'html';
|
||||
case 'markdown':
|
||||
case 'md':
|
||||
return 'markdown';
|
||||
default:
|
||||
return 'code';
|
||||
}
|
||||
}
|
||||
|
||||
function generateTitle(type: AINotePadContent['type'], language?: string, content?: string): string {
|
||||
switch (type) {
|
||||
case 'mermaid':
|
||||
// Try to detect mermaid diagram type
|
||||
if (content?.includes('graph')) return 'Flowchart';
|
||||
if (content?.includes('sequenceDiagram')) return 'Sequence Diagram';
|
||||
if (content?.includes('pie')) return 'Pie Chart';
|
||||
if (content?.includes('gantt')) return 'Gantt Chart';
|
||||
return 'Mermaid Diagram';
|
||||
|
||||
case 'json':
|
||||
return 'JSON Data';
|
||||
|
||||
case 'html':
|
||||
return 'HTML Document';
|
||||
|
||||
case 'markdown':
|
||||
return 'Markdown Document';
|
||||
|
||||
case 'code':
|
||||
if (language) {
|
||||
return `${language.charAt(0).toUpperCase() + language.slice(1)} Code`;
|
||||
}
|
||||
return 'Code Sample';
|
||||
|
||||
default:
|
||||
return 'Text Content';
|
||||
}
|
||||
}
|
||||
97
apps/tenant-app/src/lib/permissions.ts
Normal file
97
apps/tenant-app/src/lib/permissions.ts
Normal file
@@ -0,0 +1,97 @@
|
||||
/**
|
||||
* GT 2.0 Permission Utilities
|
||||
* Role-based permission helpers for UI
|
||||
*/
|
||||
|
||||
import { getUser } from '@/services/auth';
|
||||
|
||||
export type UserRole = 'admin' | 'developer' | 'analyst' | 'student';
|
||||
export type VisibilityLevel = 'individual' | 'organization';
|
||||
|
||||
const ADMIN_ROLES: UserRole[] = ['admin', 'developer'];
|
||||
|
||||
/**
|
||||
* Get current user's role from stored user data
|
||||
*/
|
||||
export function getUserRole(): UserRole | null {
|
||||
const user = getUser();
|
||||
return user?.role || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if user can share resources to organization level
|
||||
* Only admin and developer roles can share to organization
|
||||
*/
|
||||
export function canShareToOrganization(role?: UserRole | null): boolean {
|
||||
if (!role) {
|
||||
role = getUserRole();
|
||||
}
|
||||
return role ? ADMIN_ROLES.includes(role) : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get available visibility options for current user
|
||||
* Returns array of visibility levels the user is allowed to set
|
||||
*/
|
||||
export function getAvailableVisibilityOptions(): VisibilityLevel[] {
|
||||
const role = getUserRole();
|
||||
|
||||
if (canShareToOrganization(role)) {
|
||||
// Admin and developer can use both visibility levels
|
||||
return ['individual', 'organization'];
|
||||
}
|
||||
|
||||
// Analyst and student can only use individual
|
||||
return ['individual'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if user can edit a resource
|
||||
* @param resourceOwnerId - The UUID/email of the resource creator
|
||||
* @param resourceVisibility - The visibility level of the resource
|
||||
*/
|
||||
export function canEditResource(
|
||||
resourceOwnerId: string,
|
||||
resourceVisibility: VisibilityLevel
|
||||
): boolean {
|
||||
const user = getUser();
|
||||
if (!user) return false;
|
||||
|
||||
const role = getUserRole();
|
||||
|
||||
// Admin and developer can edit anything
|
||||
if (role && ADMIN_ROLES.includes(role)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Owner can always edit their own resources
|
||||
if (user.email === resourceOwnerId || user.user_id === resourceOwnerId) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Organization resources are read-only for non-admins who didn't create it
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if user can delete a resource
|
||||
* @param resourceOwnerId - The UUID/email of the resource creator
|
||||
*/
|
||||
export function canDeleteResource(resourceOwnerId: string): boolean {
|
||||
const user = getUser();
|
||||
if (!user) return false;
|
||||
|
||||
const role = getUserRole();
|
||||
|
||||
// Admin and developer can delete anything
|
||||
if (role && ADMIN_ROLES.includes(role)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Owner can delete
|
||||
if (user.email === resourceOwnerId || user.user_id === resourceOwnerId) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
121
apps/tenant-app/src/lib/providers.tsx
Normal file
121
apps/tenant-app/src/lib/providers.tsx
Normal file
@@ -0,0 +1,121 @@
|
||||
'use client';
|
||||
|
||||
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
|
||||
import { useState, ReactNode, useEffect } from 'react';
|
||||
import { isTokenValid } from '@/services/auth';
|
||||
import { useChatStore } from '@/stores/chat-store';
|
||||
import { SessionMonitor } from '@/providers/session-monitor';
|
||||
|
||||
interface ProvidersProps {
|
||||
children: ReactNode;
|
||||
}
|
||||
|
||||
// Network status hook for offline resilience
|
||||
function useNetworkStatus() {
|
||||
const [isOnline, setIsOnline] = useState(true);
|
||||
const [isServerReachable, setIsServerReachable] = useState(true);
|
||||
|
||||
useEffect(() => {
|
||||
// Check server connectivity
|
||||
const checkServer = async () => {
|
||||
try {
|
||||
const response = await fetch('/api/v1/health', {
|
||||
method: 'GET'
|
||||
});
|
||||
setIsServerReachable(response.ok);
|
||||
} catch {
|
||||
setIsServerReachable(false);
|
||||
}
|
||||
};
|
||||
|
||||
// When browser comes back online, immediately check server
|
||||
const handleOnline = () => {
|
||||
setIsOnline(true);
|
||||
// Immediately check if server is reachable when network comes back
|
||||
checkServer();
|
||||
};
|
||||
const handleOffline = () => setIsOnline(false);
|
||||
|
||||
window.addEventListener('online', handleOnline);
|
||||
window.addEventListener('offline', handleOffline);
|
||||
|
||||
const serverCheck = setInterval(checkServer, 60000); // Check every 60 seconds
|
||||
checkServer(); // Initial check
|
||||
|
||||
return () => {
|
||||
window.removeEventListener('online', handleOnline);
|
||||
window.removeEventListener('offline', handleOffline);
|
||||
clearInterval(serverCheck);
|
||||
};
|
||||
}, []);
|
||||
|
||||
return { isOnline, isServerReachable };
|
||||
}
|
||||
|
||||
export function Providers({ children }: ProvidersProps) {
|
||||
const { isOnline, isServerReachable } = useNetworkStatus();
|
||||
|
||||
const [queryClient] = useState(
|
||||
() =>
|
||||
new QueryClient({
|
||||
defaultOptions: {
|
||||
queries: {
|
||||
// With SSR, we usually want to set some default staleTime
|
||||
// above 0 to avoid refetching immediately on the client
|
||||
staleTime: 60 * 1000, // 1 minute
|
||||
retry: (failureCount, error: any) => {
|
||||
// GT 2.0: Don't retry on auth errors - they need user intervention
|
||||
if (error?.status === 401 || error?.status === 403) {
|
||||
// Use centralized logout from auth store
|
||||
if (typeof window !== 'undefined') {
|
||||
import('@/stores/auth-store').then(({ useAuthStore }) => {
|
||||
useAuthStore.getState().logout('unauthorized');
|
||||
});
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// GT 2.0: Retry network and server errors for resilience
|
||||
if (error?.status >= 500 || !error?.status) {
|
||||
return failureCount < 3; // More retries for server issues
|
||||
}
|
||||
// Retry once for other client errors
|
||||
return failureCount < 1;
|
||||
},
|
||||
retryDelay: (attemptIndex) => Math.min(1000 * 2 ** attemptIndex, 30000), // Exponential backoff
|
||||
},
|
||||
mutations: {
|
||||
retry: false,
|
||||
},
|
||||
},
|
||||
})
|
||||
);
|
||||
|
||||
// Initialize WebSocket connection with QueryClient for real-time updates
|
||||
useEffect(() => {
|
||||
const token = localStorage.getItem('gt2_token');
|
||||
if (token && isTokenValid()) { // isTokenValid() checks token internally
|
||||
console.log('🔌 Initializing WebSocket connection from Providers');
|
||||
useChatStore.getState().connect(queryClient);
|
||||
}
|
||||
|
||||
// No cleanup needed - WebSocket should stay alive throughout app lifecycle
|
||||
// Socket will naturally disconnect when browser tab closes
|
||||
}, [queryClient]);
|
||||
|
||||
return (
|
||||
<QueryClientProvider client={queryClient}>
|
||||
{/* Session Monitor - server-authoritative session management (Issue #264) */}
|
||||
<SessionMonitor>
|
||||
{/* Network Status Indicator */}
|
||||
{(!isOnline || !isServerReachable) && (
|
||||
<div className="fixed top-0 left-0 right-0 bg-yellow-500 text-white text-center py-2 text-sm z-50">
|
||||
{!isOnline ? '📡 No internet connection - working offline' : '⚠️ Server unreachable - showing cached data'}
|
||||
</div>
|
||||
)}
|
||||
<div className={(!isOnline || !isServerReachable) ? 'pt-10' : ''}>
|
||||
{children}
|
||||
</div>
|
||||
</SessionMonitor>
|
||||
</QueryClientProvider>
|
||||
);
|
||||
}
|
||||
225
apps/tenant-app/src/lib/team-permissions.ts
Normal file
225
apps/tenant-app/src/lib/team-permissions.ts
Normal file
@@ -0,0 +1,225 @@
|
||||
/**
|
||||
* Team Permission Helpers for GT 2.0
|
||||
*
|
||||
* Implements hierarchical team permission system:
|
||||
* - Owner: Full control (edit team, manage all members, view Observable, share resources)
|
||||
* - Manager: Limited management (manage non-owner members, view Observable, share resources, invite)
|
||||
* - Contributor: Resource sharing only (share own resources)
|
||||
* - Member: Access only (use shared resources per resource permissions)
|
||||
*
|
||||
* Separate from:
|
||||
* - Resource-level permissions (Read/Edit) - set per-resource by sharer
|
||||
* - System roles (admin/developer/analyst/student) - tenant-wide permissions
|
||||
*/
|
||||
|
||||
export type TeamRole = 'owner' | 'manager' | 'contributor' | 'member';
|
||||
export type TeamPermission = 'read' | 'share' | 'manager'; // Database values
|
||||
|
||||
export interface TeamMember {
|
||||
user_id: string;
|
||||
user_email: string;
|
||||
user_name: string;
|
||||
team_permission: TeamPermission;
|
||||
is_observable: boolean;
|
||||
observable_consent_status: 'none' | 'pending' | 'approved' | 'revoked';
|
||||
observable_consent_at?: string;
|
||||
status: 'pending' | 'accepted' | 'declined';
|
||||
}
|
||||
|
||||
export interface Team {
|
||||
id: string;
|
||||
name: string;
|
||||
owner_id: string;
|
||||
is_owner: boolean;
|
||||
can_manage: boolean;
|
||||
user_permission?: TeamPermission;
|
||||
member_count: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map database team_permission to user-facing role.
|
||||
* Owner is determined by matching user_id with team.owner_id.
|
||||
*/
|
||||
export function getTeamRole(member: TeamMember | null, ownerId: string, userId: string): TeamRole {
|
||||
if (userId === ownerId) return 'owner';
|
||||
if (!member) return 'member'; // Not in team
|
||||
|
||||
switch (member.team_permission) {
|
||||
case 'manager':
|
||||
return 'manager';
|
||||
case 'share':
|
||||
return 'contributor';
|
||||
case 'read':
|
||||
default:
|
||||
return 'member';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Map user-facing role to database team_permission value.
|
||||
* Owner is stored separately in teams.owner_id, not in team_permission.
|
||||
*/
|
||||
export function getTeamPermissionValue(role: TeamRole): TeamPermission {
|
||||
switch (role) {
|
||||
case 'owner':
|
||||
// Owner doesn't have a team_permission row (they're in teams.owner_id)
|
||||
// If we need to create a membership for owner, use 'manager' equivalent
|
||||
return 'manager';
|
||||
case 'manager':
|
||||
return 'manager';
|
||||
case 'contributor':
|
||||
return 'share';
|
||||
case 'member':
|
||||
default:
|
||||
return 'read';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if user can edit team details (name, description).
|
||||
* Only owner can edit team.
|
||||
*/
|
||||
export function canEditTeam(role: TeamRole): boolean {
|
||||
return role === 'owner';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if user can view Observable member activity.
|
||||
* Owner and Manager can view activity of Observable members.
|
||||
*/
|
||||
export function canViewObservability(role: TeamRole): boolean {
|
||||
return role === 'owner' || role === 'manager';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if user can manage members (add, remove, change roles).
|
||||
* Owner can manage all members.
|
||||
* Manager can manage non-owner members.
|
||||
*/
|
||||
export function canManageMembers(userRole: TeamRole, targetRole?: TeamRole): boolean {
|
||||
if (userRole === 'owner') return true;
|
||||
if (userRole === 'manager') {
|
||||
return !targetRole || targetRole !== 'owner'; // Manager cannot modify owner
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if user can share resources to team.
|
||||
* Owner, Manager, and Contributor can share.
|
||||
*/
|
||||
export function canShareResources(role: TeamRole): boolean {
|
||||
return ['owner', 'manager', 'contributor'].includes(role);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if user can invite new members.
|
||||
* Owner and Manager can invite.
|
||||
*/
|
||||
export function canInviteMembers(role: TeamRole): boolean {
|
||||
return role === 'owner' || role === 'manager';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if user can set a specific role on a member.
|
||||
* Owner can set any role.
|
||||
* Manager can only set Contributor or Member roles (not Owner or Manager).
|
||||
*/
|
||||
export function canSetRole(userRole: TeamRole, targetRole: TeamRole): boolean {
|
||||
if (userRole === 'owner') return true;
|
||||
if (userRole === 'manager') {
|
||||
return targetRole === 'contributor' || targetRole === 'member';
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if user can request Observable access from members.
|
||||
* Owner and Manager can request.
|
||||
*/
|
||||
export function canRequestObservable(role: TeamRole): boolean {
|
||||
return role === 'owner' || role === 'manager';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get badge variant for team role display.
|
||||
*/
|
||||
export function getRoleBadgeVariant(role: TeamRole): 'default' | 'secondary' | 'outline' | 'ghost' {
|
||||
const variants: Record<TeamRole, 'default' | 'secondary' | 'outline' | 'ghost'> = {
|
||||
owner: 'default',
|
||||
manager: 'secondary',
|
||||
contributor: 'outline',
|
||||
member: 'ghost'
|
||||
};
|
||||
return variants[role];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get user-friendly label for team role.
|
||||
*/
|
||||
export function getRoleLabel(role: TeamRole): string {
|
||||
const labels: Record<TeamRole, string> = {
|
||||
owner: 'Owner',
|
||||
manager: 'Manager',
|
||||
contributor: 'Contributor',
|
||||
member: 'Member'
|
||||
};
|
||||
return labels[role];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get role description for tooltips/help text.
|
||||
*/
|
||||
export function getRoleDescription(role: TeamRole): string {
|
||||
const descriptions: Record<TeamRole, string> = {
|
||||
owner: 'Full control over team, members, and settings',
|
||||
manager: 'Can manage members, view Observable activity, and share resources',
|
||||
contributor: 'Can share own resources with team',
|
||||
member: 'Can access shared resources'
|
||||
};
|
||||
return descriptions[role];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Observable status badge variant.
|
||||
*/
|
||||
export function getObservableBadgeVariant(status: string): 'default' | 'secondary' | 'outline' {
|
||||
switch (status) {
|
||||
case 'approved':
|
||||
return 'default';
|
||||
case 'pending':
|
||||
return 'secondary';
|
||||
default:
|
||||
return 'outline';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Observable status label.
|
||||
*/
|
||||
export function getObservableLabel(status: string): string {
|
||||
switch (status) {
|
||||
case 'approved':
|
||||
return 'Observable';
|
||||
case 'pending':
|
||||
return 'Observable (Pending)';
|
||||
case 'revoked':
|
||||
return 'Observable (Revoked)';
|
||||
default:
|
||||
return '';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if member is Observable (approved status).
|
||||
*/
|
||||
export function isObservable(member: TeamMember): boolean {
|
||||
return member.is_observable && member.observable_consent_status === 'approved';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if member has pending Observable request.
|
||||
*/
|
||||
export function hasPendingObservableRequest(member: TeamMember): boolean {
|
||||
return member.is_observable && member.observable_consent_status === 'pending';
|
||||
}
|
||||
363
apps/tenant-app/src/lib/utils.ts
Normal file
363
apps/tenant-app/src/lib/utils.ts
Normal file
@@ -0,0 +1,363 @@
|
||||
import { type ClassValue, clsx } from 'clsx';
|
||||
import { twMerge } from 'tailwind-merge';
|
||||
import { useEffect, useState } from 'react';
|
||||
|
||||
/**
|
||||
* Utility function for combining class names with Tailwind CSS merge support
|
||||
*/
|
||||
export function cn(...inputs: ClassValue[]) {
|
||||
return twMerge(clsx(inputs));
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom hook for debouncing values (performance optimization)
|
||||
*
|
||||
* Use this to prevent expensive operations from running on every keystroke.
|
||||
* Example: Debounce search input to avoid filtering on every character typed.
|
||||
*
|
||||
* @param value The value to debounce
|
||||
* @param delay Delay in milliseconds (default: 300ms)
|
||||
* @returns Debounced value
|
||||
*
|
||||
* @example
|
||||
* const [searchQuery, setSearchQuery] = useState('');
|
||||
* const debouncedSearch = useDebouncedValue(searchQuery, 300);
|
||||
*
|
||||
* // Use debouncedSearch for filtering instead of searchQuery
|
||||
* const filtered = items.filter(item => item.name.includes(debouncedSearch));
|
||||
*/
|
||||
export function useDebouncedValue<T>(value: T, delay: number = 300): T {
|
||||
const [debouncedValue, setDebouncedValue] = useState<T>(value);
|
||||
|
||||
useEffect(() => {
|
||||
// Set up a timer to update the debounced value after the delay
|
||||
const handler = setTimeout(() => {
|
||||
setDebouncedValue(value);
|
||||
}, delay);
|
||||
|
||||
// Clean up the timer if value changes before delay completes
|
||||
return () => {
|
||||
clearTimeout(handler);
|
||||
};
|
||||
}, [value, delay]);
|
||||
|
||||
return debouncedValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format date for display in the UI (uses browser locale)
|
||||
*/
|
||||
export function formatDate(date: string | Date): string {
|
||||
const d = typeof date === 'string' ? new Date(date) : date;
|
||||
return d.toLocaleDateString(undefined, {
|
||||
year: 'numeric',
|
||||
month: 'short',
|
||||
day: 'numeric',
|
||||
hour: '2-digit',
|
||||
minute: '2-digit',
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Format date and time for display (uses browser locale)
|
||||
*/
|
||||
export function formatDateTime(date: string | Date): string {
|
||||
const d = typeof date === 'string' ? new Date(date) : date;
|
||||
return d.toLocaleString(undefined, {
|
||||
year: 'numeric',
|
||||
month: 'short',
|
||||
day: 'numeric',
|
||||
hour: '2-digit',
|
||||
minute: '2-digit',
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Format time only for display (uses browser locale)
|
||||
*/
|
||||
export function formatTime(date: string | Date): string {
|
||||
const d = typeof date === 'string' ? new Date(date) : date;
|
||||
return d.toLocaleTimeString(undefined, {
|
||||
hour: '2-digit',
|
||||
minute: '2-digit',
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Format date only (no time) for display (uses browser locale)
|
||||
*/
|
||||
export function formatDateOnly(date: string | Date): string {
|
||||
const d = typeof date === 'string' ? new Date(date) : date;
|
||||
return d.toLocaleDateString(undefined, {
|
||||
year: 'numeric',
|
||||
month: 'short',
|
||||
day: 'numeric',
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Format date for relative display (e.g., "2 hours ago")
|
||||
*/
|
||||
export function formatRelativeTime(date: string | Date): string {
|
||||
const d = typeof date === 'string' ? new Date(date) : date;
|
||||
const now = new Date();
|
||||
const diffInSeconds = Math.floor((now.getTime() - d.getTime()) / 1000);
|
||||
|
||||
if (diffInSeconds < 60) {
|
||||
return 'just now';
|
||||
}
|
||||
|
||||
const diffInMinutes = Math.floor(diffInSeconds / 60);
|
||||
if (diffInMinutes < 60) {
|
||||
return `${diffInMinutes}m ago`;
|
||||
}
|
||||
|
||||
const diffInHours = Math.floor(diffInMinutes / 60);
|
||||
if (diffInHours < 24) {
|
||||
return `${diffInHours}h ago`;
|
||||
}
|
||||
|
||||
const diffInDays = Math.floor(diffInHours / 24);
|
||||
if (diffInDays < 7) {
|
||||
return `${diffInDays}d ago`;
|
||||
}
|
||||
|
||||
const diffInWeeks = Math.floor(diffInDays / 7);
|
||||
if (diffInWeeks < 4) {
|
||||
return `${diffInWeeks}w ago`;
|
||||
}
|
||||
|
||||
const diffInMonths = Math.floor(diffInDays / 30);
|
||||
if (diffInMonths < 12) {
|
||||
return `${diffInMonths}mo ago`;
|
||||
}
|
||||
|
||||
const diffInYears = Math.floor(diffInDays / 365);
|
||||
return `${diffInYears}y ago`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format file size for display
|
||||
*/
|
||||
export function formatFileSize(bytes: number): string {
|
||||
const units = ['B', 'KiB', 'MiB', 'GiB'];
|
||||
let size = bytes;
|
||||
let unitIndex = 0;
|
||||
|
||||
while (size >= 1024 && unitIndex < units.length - 1) {
|
||||
size /= 1024;
|
||||
unitIndex++;
|
||||
}
|
||||
|
||||
return `${size.toFixed(unitIndex === 0 ? 0 : 1)} ${units[unitIndex]}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format storage size in megabytes for display
|
||||
*/
|
||||
export function formatStorageSize(mb: number): string {
|
||||
if (mb >= 1024 * 1024) return `${(mb / (1024 * 1024)).toFixed(2)} TiB`;
|
||||
if (mb >= 1024) return `${(mb / 1024).toFixed(2)} GiB`;
|
||||
return `${mb.toFixed(2)} MiB`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format token count for display
|
||||
*/
|
||||
export function formatTokenCount(tokens: number): string {
|
||||
if (tokens < 1000) {
|
||||
return tokens.toString();
|
||||
}
|
||||
|
||||
if (tokens < 1000000) {
|
||||
return `${(tokens / 1000).toFixed(1)}K`;
|
||||
}
|
||||
|
||||
return `${(tokens / 1000000).toFixed(1)}M`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format cost in cents to dollars (uses browser locale)
|
||||
*/
|
||||
export function formatCost(cents: number): string {
|
||||
const dollars = cents / 100;
|
||||
return new Intl.NumberFormat(undefined, {
|
||||
style: 'currency',
|
||||
currency: 'USD',
|
||||
minimumFractionDigits: dollars < 1 ? 3 : 2,
|
||||
}).format(dollars);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a random UUID v4
|
||||
*/
|
||||
export function generateUUID(): string {
|
||||
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
|
||||
const r = Math.random() * 16 | 0;
|
||||
const v = c == 'x' ? r : (r & 0x3 | 0x8);
|
||||
return v.toString(16);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncate text to a specified length
|
||||
*/
|
||||
export function truncateText(text: string, maxLength: number): string {
|
||||
if (text.length <= maxLength) {
|
||||
return text;
|
||||
}
|
||||
return text.slice(0, maxLength) + '...';
|
||||
}
|
||||
|
||||
/**
|
||||
* Debounce function
|
||||
*/
|
||||
export function debounce<T extends (...args: any[]) => any>(
|
||||
func: T,
|
||||
wait: number
|
||||
): (...args: Parameters<T>) => void {
|
||||
let timeout: NodeJS.Timeout;
|
||||
return (...args: Parameters<T>) => {
|
||||
clearTimeout(timeout);
|
||||
timeout = setTimeout(() => func(...args), wait);
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Throttle function
|
||||
*/
|
||||
export function throttle<T extends (...args: any[]) => any>(
|
||||
func: T,
|
||||
limit: number
|
||||
): (...args: Parameters<T>) => void {
|
||||
let inThrottle: boolean;
|
||||
return (...args: Parameters<T>) => {
|
||||
if (!inThrottle) {
|
||||
func(...args);
|
||||
inThrottle = true;
|
||||
setTimeout(() => inThrottle = false, limit);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean model names by removing provider prefixes like "groq/"
|
||||
*/
|
||||
export function cleanModelName(name: string): string {
|
||||
return name.replace(/^(groq|openai|anthropic)\//, '');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a string is a valid email
|
||||
*/
|
||||
export function isValidEmail(email: string): boolean {
|
||||
const emailRegex = /^[^\s@]+@[^\s@]+\.[^\s@]+$/;
|
||||
return emailRegex.test(email);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get initials from a full name
|
||||
*/
|
||||
export function getInitials(name: string): string {
|
||||
if (!name) return '';
|
||||
|
||||
return name
|
||||
.split(' ')
|
||||
.map(word => word.charAt(0).toUpperCase())
|
||||
.slice(0, 2)
|
||||
.join('');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the current environment is development
|
||||
*/
|
||||
export function isDevelopment(): boolean {
|
||||
return process.env.NODE_ENV === 'development';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the base API URL
|
||||
*/
|
||||
export function getApiUrl(): string {
|
||||
// Use relative path for browser - Next.js will proxy to tenant-backend via Docker network
|
||||
if (typeof window !== 'undefined') {
|
||||
return '';
|
||||
}
|
||||
// Server-side uses internal Docker network URL
|
||||
return process.env.INTERNAL_BACKEND_URL || 'http://tenant-backend:8000';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get WebSocket URL
|
||||
* Returns current window origin to route Socket.IO through Next.js proxy.
|
||||
* Next.js rewrites /socket.io/* to backend via INTERNAL_BACKEND_URL.
|
||||
* This ensures Socket.IO works through Cloudflare tunnel (port 3002 only).
|
||||
*/
|
||||
export function getWebSocketUrl(): string {
|
||||
if (typeof window !== 'undefined') {
|
||||
// Use current window origin (no port override)
|
||||
// Next.js will proxy /socket.io/* to backend via rewrite rules
|
||||
return window.location.origin;
|
||||
}
|
||||
// Server-side fallback (not used for Socket.IO client connections)
|
||||
return 'ws://tenant-backend:8000';
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle API errors consistently
|
||||
*/
|
||||
export function handleApiError(error: any): string {
|
||||
if (error?.response?.data?.message) {
|
||||
return error.response.data.message;
|
||||
}
|
||||
|
||||
if (error?.message) {
|
||||
return error.message;
|
||||
}
|
||||
|
||||
return 'An unexpected error occurred. Please try again.';
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a conversation title from the first message
|
||||
*/
|
||||
export function generateConversationTitle(firstMessage: string): string {
|
||||
// Remove common prefixes
|
||||
const cleaned = firstMessage
|
||||
.replace(/^(hi|hello|hey|can you|could you|please|i need|help me|how do|what is|what are)/i, '')
|
||||
.trim();
|
||||
|
||||
// Truncate to reasonable length
|
||||
const truncated = truncateText(cleaned, 50);
|
||||
|
||||
// Return cleaned title or fallback
|
||||
return truncated || 'New Conversation';
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy text to clipboard
|
||||
*/
|
||||
export async function copyToClipboard(text: string): Promise<boolean> {
|
||||
try {
|
||||
await navigator.clipboard.writeText(text);
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.error('Failed to copy to clipboard:', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Download a file with specified content
|
||||
*/
|
||||
export function downloadFile(content: string, filename: string, mimeType: string = 'text/plain'): void {
|
||||
const blob = new Blob([content], { type: mimeType });
|
||||
const url = URL.createObjectURL(blob);
|
||||
const link = document.createElement('a');
|
||||
link.href = url;
|
||||
link.download = filename;
|
||||
document.body.appendChild(link);
|
||||
link.click();
|
||||
document.body.removeChild(link);
|
||||
URL.revokeObjectURL(url);
|
||||
}
|
||||
Reference in New Issue
Block a user