From c7c0659a85098366659eab490147daeaaf89d947 Mon Sep 17 00:00:00 2001
From: junlan <15167915727@163.com>
Date: Mon, 13 Apr 2026 11:34:23 +0800
Subject: [PATCH] init. project
---
.github/AGENTS.md | 24 +
.github/skills/decompose-test-items/SKILL.md | 46 +
.github/skills/generate-test-cases/SKILL.md | 45 +
.../skills/identify-requirement-type/SKILL.md | 62 +
.github/skills/testing-orchestrator/SKILL.md | 57 +
.github/测试项分解要求.md | 108 +
README.md | 164 +
rag-web-ui/.env.example | 55 +
rag-web-ui/.gitattributes | 27 +
rag-web-ui/.gitignore | 60 +
rag-web-ui/LICENSE | 201 +
rag-web-ui/backend/Dockerfile | 35 +
rag-web-ui/backend/Dockerfile.dev | 37 +
rag-web-ui/backend/__init__.py | 1 +
rag-web-ui/backend/alembic.ini | 42 +
rag-web-ui/backend/alembic/env.py | 90 +
rag-web-ui/backend/alembic/script.py.mako | 26 +
.../3580c0dcd005_increase_api_key_length.py | 36 +
...1361d_rename_metadata_to_chunk_metadata.py | 116 +
...7_add_document_upload_id_to_processing_.py | 37 +
...f9c89b7d11_add_tool_jobs_and_srs_tables.py | 102 +
.../e214adf7fb66_add_api_keys_table.py | 49 +
...fd73eebc87c1_add_document_uploads_table.py | 44 +
.../alembic/versions/initial_schema.py | 148 +
rag-web-ui/backend/app/__init__.py | 0
rag-web-ui/backend/app/api/__init__.py | 0
rag-web-ui/backend/app/api/api_v1/__init__.py | 0
rag-web-ui/backend/app/api/api_v1/api.py | 11 +
rag-web-ui/backend/app/api/api_v1/api_keys.py | 84 +
rag-web-ui/backend/app/api/api_v1/auth.py | 88 +
rag-web-ui/backend/app/api/api_v1/chat.py | 155 +
.../backend/app/api/api_v1/knowledge_base.py | 575 ++
rag-web-ui/backend/app/api/api_v1/testing.py | 84 +
rag-web-ui/backend/app/api/api_v1/tools.py | 175 +
rag-web-ui/backend/app/api/openapi/api.py | 6 +
.../backend/app/api/openapi/knowledge.py | 60 +
rag-web-ui/backend/app/core/__init__.py | 0
rag-web-ui/backend/app/core/config.py | 123 +
rag-web-ui/backend/app/core/minio.py | 29 +
rag-web-ui/backend/app/core/runtime_checks.py | 27 +
rag-web-ui/backend/app/core/security.py | 84 +
rag-web-ui/backend/app/db/__init__.py | 0
rag-web-ui/backend/app/db/session.py | 13 +
rag-web-ui/backend/app/main.py | 47 +
rag-web-ui/backend/app/models/__init__.py | 18 +
rag-web-ui/backend/app/models/api_key.py | 18 +
rag-web-ui/backend/app/models/base.py | 9 +
rag-web-ui/backend/app/models/chat.py | 39 +
rag-web-ui/backend/app/models/knowledge.py | 97 +
rag-web-ui/backend/app/models/tooling.py | 76 +
rag-web-ui/backend/app/models/user.py | 18 +
rag-web-ui/backend/app/schemas/__init__.py | 12 +
rag-web-ui/backend/app/schemas/api_key.py | 28 +
rag-web-ui/backend/app/schemas/chat.py | 39 +
rag-web-ui/backend/app/schemas/knowledge.py | 85 +
rag-web-ui/backend/app/schemas/testing.py | 59 +
rag-web-ui/backend/app/schemas/token.py | 9 +
rag-web-ui/backend/app/schemas/tooling.py | 52 +
rag-web-ui/backend/app/schemas/user.py | 23 +
rag-web-ui/backend/app/services/__init__.py | 0
rag-web-ui/backend/app/services/api_key.py | 61 +
.../backend/app/services/chat_service.py | 532 ++
.../backend/app/services/chunk_record.py | 69 +
.../app/services/document_processor.py | 582 ++
.../services/embedding/embedding_factory.py | 46 +
.../backend/app/services/fusion_prompts.py | 116 +
.../backend/app/services/graph/__init__.py | 3 +
.../app/services/graph/graphrag_adapter.py | 183 +
.../backend/app/services/hybrid_retriever.py | 85 +
.../backend/app/services/intent_router.py | 120 +
.../backend/app/services/llm/llm_factory.py | 57 +
.../backend/app/services/reranker/__init__.py | 3 +
.../app/services/reranker/external_api.py | 164 +
.../app/services/retrieval/__init__.py | 3 +
.../services/retrieval/multi_kb_retriever.py | 131 +
.../backend/app/services/srs_job_service.py | 187 +
.../app/services/testing_pipeline/__init__.py | 3 +
.../app/services/testing_pipeline/base.py | 20 +
.../app/services/testing_pipeline/pipeline.py | 99 +
.../app/services/testing_pipeline/rules.py | 203 +
.../app/services/testing_pipeline/tools.py | 867 ++
.../backend/app/services/vector_schema.py | 122 +
.../app/services/vector_store/__init__.py | 11 +
.../backend/app/services/vector_store/base.py | 42 +
.../app/services/vector_store/chroma.py | 47 +
.../app/services/vector_store/factory.py | 59 +
.../app/services/vector_store/qdrant.py | 43 +
rag-web-ui/backend/app/startup/migarate.py | 100 +
rag-web-ui/backend/app/tools/__init__.py | 4 +
rag-web-ui/backend/app/tools/base.py | 11 +
rag-web-ui/backend/app/tools/registry.py | 19 +
.../app/tools/srs_reqs_qwen/__init__.py | 3 +
.../tools/srs_reqs_qwen/default_config.yaml | 102 +
.../app/tools/srs_reqs_qwen/src/__init__.py | 26 +
.../srs_reqs_qwen/src/document_parser.py | 709 ++
.../tools/srs_reqs_qwen/src/json_generator.py | 198 +
.../tools/srs_reqs_qwen/src/llm_interface.py | 197 +
.../src/requirement_extractor.py | 1008 +++
.../src/requirement_id_generator.py | 74 +
.../srs_reqs_qwen/src/requirement_splitter.py | 188 +
.../app/tools/srs_reqs_qwen/src/settings.py | 162 +
.../app/tools/srs_reqs_qwen/src/utils.py | 134 +
.../backend/app/tools/srs_reqs_qwen/tool.py | 148 +
rag-web-ui/backend/clean_db.py | 23 +
rag-web-ui/backend/diff.py | 166 +
rag-web-ui/backend/entrypoint.sh | 25 +
rag-web-ui/backend/nano_graphrag/__init__.py | 7 +
rag-web-ui/backend/nano_graphrag/_llm.py | 301 +
rag-web-ui/backend/nano_graphrag/_op.py | 1140 +++
rag-web-ui/backend/nano_graphrag/_splitter.py | 94 +
.../nano_graphrag/_storage/__init__.py | 9 +
.../nano_graphrag/_storage/gdb_neo4j.py | 529 ++
.../nano_graphrag/_storage/gdb_networkx.py | 268 +
.../backend/nano_graphrag/_storage/kv_json.py | 46 +
.../nano_graphrag/_storage/vdb_hnswlib.py | 141 +
.../_storage/vdb_nanovectordb.py | 68 +
rag-web-ui/backend/nano_graphrag/_utils.py | 307 +
rag-web-ui/backend/nano_graphrag/base.py | 186 +
.../entity_extraction/__init__.py | 0
.../entity_extraction/extract.py | 171 +
.../nano_graphrag/entity_extraction/metric.py | 62 +
.../nano_graphrag/entity_extraction/module.py | 330 +
rag-web-ui/backend/nano_graphrag/graphrag.py | 382 +
rag-web-ui/backend/nano_graphrag/prompt.py | 305 +
rag-web-ui/backend/requirements.txt | 33 +
rag-web-ui/backend/tests/__init__.py | 0
.../backend/tests/test_testing_pipeline.py | 127 +
rag-web-ui/backend/uploads/README.md | 97 +
rag-web-ui/docker-compose.dev.yml | 135 +
rag-web-ui/docker-compose.yml | 130 +
rag-web-ui/frontend/.dockerignore | 9 +
rag-web-ui/frontend/.gitignore | 41 +
rag-web-ui/frontend/Dockerfile | 60 +
rag-web-ui/frontend/Dockerfile.dev | 21 +
rag-web-ui/frontend/README.md | 36 +
rag-web-ui/frontend/components.json | 17 +
rag-web-ui/frontend/eslint.config.mjs | 16 +
rag-web-ui/frontend/next.config.js | 11 +
rag-web-ui/frontend/package.json | 61 +
rag-web-ui/frontend/pnpm-lock.yaml | 7226 +++++++++++++++++
rag-web-ui/frontend/postcss.config.js | 6 +
rag-web-ui/frontend/postcss.config.mjs | 8 +
rag-web-ui/frontend/public/file.svg | 1 +
rag-web-ui/frontend/public/globe.svg | 1 +
rag-web-ui/frontend/public/logo.png | Bin 0 -> 131393 bytes
rag-web-ui/frontend/public/logo.svg | 46 +
rag-web-ui/frontend/public/next.svg | 1 +
rag-web-ui/frontend/public/vercel.svg | 1 +
rag-web-ui/frontend/public/window.svg | 1 +
.../src/app/dashboard/api-keys/page.tsx | 373 +
.../src/app/dashboard/chat/[id]/page.tsx | 421 +
.../src/app/dashboard/chat/new/page.tsx | 204 +
.../frontend/src/app/dashboard/chat/page.tsx | 197 +
.../dashboard/consistency-analysis/page.tsx | 308 +
.../dashboard/doc-processing/extract/page.tsx | 539 ++
.../src/app/dashboard/doc-processing/page.tsx | 5 +
.../doc-processing/test-case-gen/page.tsx | 362 +
.../src/app/dashboard/knowledge/[id]/page.tsx | 61 +
.../dashboard/knowledge/[id]/upload/page.tsx | 369 +
.../src/app/dashboard/knowledge/new/page.tsx | 123 +
.../src/app/dashboard/knowledge/page.tsx | 220 +
.../frontend/src/app/dashboard/page.tsx | 259 +
.../dashboard/test-retrieval/[id]/page.tsx | 193 +
rag-web-ui/frontend/src/app/favicon.ico | Bin 0 -> 25931 bytes
rag-web-ui/frontend/src/app/globals.css | 80 +
rag-web-ui/frontend/src/app/layout.tsx | 28 +
rag-web-ui/frontend/src/app/login/page.tsx | 130 +
rag-web-ui/frontend/src/app/page.tsx | 111 +
rag-web-ui/frontend/src/app/register/page.tsx | 246 +
.../frontend/src/components/chat/answer.tsx | 206 +
.../knowledge-base/document-list.tsx | 176 +
.../knowledge-base/document-upload-steps.tsx | 709 ++
.../components/layout/dashboard-layout.tsx | 265 +
.../src/components/theme-provider.tsx | 9 +
.../frontend/src/components/ui/accordion.tsx | 58 +
.../frontend/src/components/ui/badge.tsx | 36 +
.../frontend/src/components/ui/breadcrumb.tsx | 78 +
.../frontend/src/components/ui/button.tsx | 56 +
.../frontend/src/components/ui/card.tsx | 79 +
.../frontend/src/components/ui/dialog.tsx | 122 +
.../frontend/src/components/ui/divider.tsx | 26 +
.../frontend/src/components/ui/input.tsx | 25 +
.../frontend/src/components/ui/label.tsx | 26 +
.../frontend/src/components/ui/popover.tsx | 31 +
.../frontend/src/components/ui/progress.tsx | 28 +
.../frontend/src/components/ui/select.tsx | 121 +
.../frontend/src/components/ui/skeleton.tsx | 15 +
.../frontend/src/components/ui/switch.tsx | 29 +
.../frontend/src/components/ui/table.tsx | 117 +
.../frontend/src/components/ui/tabs.tsx | 55 +
.../frontend/src/components/ui/toast.tsx | 129 +
.../frontend/src/components/ui/toaster.tsx | 35 +
.../frontend/src/components/ui/use-toast.ts | 194 +
rag-web-ui/frontend/src/lib/api.ts | 94 +
rag-web-ui/frontend/src/lib/document-mock.ts | 443 +
rag-web-ui/frontend/src/lib/srs-tools-api.ts | 75 +
rag-web-ui/frontend/src/lib/utils.ts | 6 +
rag-web-ui/frontend/src/styles/globals.css | 76 +
rag-web-ui/frontend/tailwind.config.ts | 83 +
rag-web-ui/frontend/tsconfig.json | 27 +
rag-web-ui/nginx.conf | 84 +
rag-web-ui/nginx.dev.conf | 113 +
202 files changed, 31196 insertions(+)
create mode 100644 .github/AGENTS.md
create mode 100644 .github/skills/decompose-test-items/SKILL.md
create mode 100644 .github/skills/generate-test-cases/SKILL.md
create mode 100644 .github/skills/identify-requirement-type/SKILL.md
create mode 100644 .github/skills/testing-orchestrator/SKILL.md
create mode 100644 .github/测试项分解要求.md
create mode 100644 README.md
create mode 100644 rag-web-ui/.env.example
create mode 100644 rag-web-ui/.gitattributes
create mode 100644 rag-web-ui/.gitignore
create mode 100644 rag-web-ui/LICENSE
create mode 100644 rag-web-ui/backend/Dockerfile
create mode 100644 rag-web-ui/backend/Dockerfile.dev
create mode 100644 rag-web-ui/backend/__init__.py
create mode 100644 rag-web-ui/backend/alembic.ini
create mode 100644 rag-web-ui/backend/alembic/env.py
create mode 100644 rag-web-ui/backend/alembic/script.py.mako
create mode 100644 rag-web-ui/backend/alembic/versions/3580c0dcd005_increase_api_key_length.py
create mode 100644 rag-web-ui/backend/alembic/versions/59cfa0f1361d_rename_metadata_to_chunk_metadata.py
create mode 100644 rag-web-ui/backend/alembic/versions/5be054bd6587_add_document_upload_id_to_processing_.py
create mode 100644 rag-web-ui/backend/alembic/versions/a4f9c89b7d11_add_tool_jobs_and_srs_tables.py
create mode 100644 rag-web-ui/backend/alembic/versions/e214adf7fb66_add_api_keys_table.py
create mode 100644 rag-web-ui/backend/alembic/versions/fd73eebc87c1_add_document_uploads_table.py
create mode 100644 rag-web-ui/backend/alembic/versions/initial_schema.py
create mode 100644 rag-web-ui/backend/app/__init__.py
create mode 100644 rag-web-ui/backend/app/api/__init__.py
create mode 100644 rag-web-ui/backend/app/api/api_v1/__init__.py
create mode 100644 rag-web-ui/backend/app/api/api_v1/api.py
create mode 100644 rag-web-ui/backend/app/api/api_v1/api_keys.py
create mode 100644 rag-web-ui/backend/app/api/api_v1/auth.py
create mode 100644 rag-web-ui/backend/app/api/api_v1/chat.py
create mode 100644 rag-web-ui/backend/app/api/api_v1/knowledge_base.py
create mode 100644 rag-web-ui/backend/app/api/api_v1/testing.py
create mode 100644 rag-web-ui/backend/app/api/api_v1/tools.py
create mode 100644 rag-web-ui/backend/app/api/openapi/api.py
create mode 100644 rag-web-ui/backend/app/api/openapi/knowledge.py
create mode 100644 rag-web-ui/backend/app/core/__init__.py
create mode 100644 rag-web-ui/backend/app/core/config.py
create mode 100644 rag-web-ui/backend/app/core/minio.py
create mode 100644 rag-web-ui/backend/app/core/runtime_checks.py
create mode 100644 rag-web-ui/backend/app/core/security.py
create mode 100644 rag-web-ui/backend/app/db/__init__.py
create mode 100644 rag-web-ui/backend/app/db/session.py
create mode 100644 rag-web-ui/backend/app/main.py
create mode 100644 rag-web-ui/backend/app/models/__init__.py
create mode 100644 rag-web-ui/backend/app/models/api_key.py
create mode 100644 rag-web-ui/backend/app/models/base.py
create mode 100644 rag-web-ui/backend/app/models/chat.py
create mode 100644 rag-web-ui/backend/app/models/knowledge.py
create mode 100644 rag-web-ui/backend/app/models/tooling.py
create mode 100644 rag-web-ui/backend/app/models/user.py
create mode 100644 rag-web-ui/backend/app/schemas/__init__.py
create mode 100644 rag-web-ui/backend/app/schemas/api_key.py
create mode 100644 rag-web-ui/backend/app/schemas/chat.py
create mode 100644 rag-web-ui/backend/app/schemas/knowledge.py
create mode 100644 rag-web-ui/backend/app/schemas/testing.py
create mode 100644 rag-web-ui/backend/app/schemas/token.py
create mode 100644 rag-web-ui/backend/app/schemas/tooling.py
create mode 100644 rag-web-ui/backend/app/schemas/user.py
create mode 100644 rag-web-ui/backend/app/services/__init__.py
create mode 100644 rag-web-ui/backend/app/services/api_key.py
create mode 100644 rag-web-ui/backend/app/services/chat_service.py
create mode 100644 rag-web-ui/backend/app/services/chunk_record.py
create mode 100644 rag-web-ui/backend/app/services/document_processor.py
create mode 100644 rag-web-ui/backend/app/services/embedding/embedding_factory.py
create mode 100644 rag-web-ui/backend/app/services/fusion_prompts.py
create mode 100644 rag-web-ui/backend/app/services/graph/__init__.py
create mode 100644 rag-web-ui/backend/app/services/graph/graphrag_adapter.py
create mode 100644 rag-web-ui/backend/app/services/hybrid_retriever.py
create mode 100644 rag-web-ui/backend/app/services/intent_router.py
create mode 100644 rag-web-ui/backend/app/services/llm/llm_factory.py
create mode 100644 rag-web-ui/backend/app/services/reranker/__init__.py
create mode 100644 rag-web-ui/backend/app/services/reranker/external_api.py
create mode 100644 rag-web-ui/backend/app/services/retrieval/__init__.py
create mode 100644 rag-web-ui/backend/app/services/retrieval/multi_kb_retriever.py
create mode 100644 rag-web-ui/backend/app/services/srs_job_service.py
create mode 100644 rag-web-ui/backend/app/services/testing_pipeline/__init__.py
create mode 100644 rag-web-ui/backend/app/services/testing_pipeline/base.py
create mode 100644 rag-web-ui/backend/app/services/testing_pipeline/pipeline.py
create mode 100644 rag-web-ui/backend/app/services/testing_pipeline/rules.py
create mode 100644 rag-web-ui/backend/app/services/testing_pipeline/tools.py
create mode 100644 rag-web-ui/backend/app/services/vector_schema.py
create mode 100644 rag-web-ui/backend/app/services/vector_store/__init__.py
create mode 100644 rag-web-ui/backend/app/services/vector_store/base.py
create mode 100644 rag-web-ui/backend/app/services/vector_store/chroma.py
create mode 100644 rag-web-ui/backend/app/services/vector_store/factory.py
create mode 100644 rag-web-ui/backend/app/services/vector_store/qdrant.py
create mode 100644 rag-web-ui/backend/app/startup/migarate.py
create mode 100644 rag-web-ui/backend/app/tools/__init__.py
create mode 100644 rag-web-ui/backend/app/tools/base.py
create mode 100644 rag-web-ui/backend/app/tools/registry.py
create mode 100644 rag-web-ui/backend/app/tools/srs_reqs_qwen/__init__.py
create mode 100644 rag-web-ui/backend/app/tools/srs_reqs_qwen/default_config.yaml
create mode 100644 rag-web-ui/backend/app/tools/srs_reqs_qwen/src/__init__.py
create mode 100644 rag-web-ui/backend/app/tools/srs_reqs_qwen/src/document_parser.py
create mode 100644 rag-web-ui/backend/app/tools/srs_reqs_qwen/src/json_generator.py
create mode 100644 rag-web-ui/backend/app/tools/srs_reqs_qwen/src/llm_interface.py
create mode 100644 rag-web-ui/backend/app/tools/srs_reqs_qwen/src/requirement_extractor.py
create mode 100644 rag-web-ui/backend/app/tools/srs_reqs_qwen/src/requirement_id_generator.py
create mode 100644 rag-web-ui/backend/app/tools/srs_reqs_qwen/src/requirement_splitter.py
create mode 100644 rag-web-ui/backend/app/tools/srs_reqs_qwen/src/settings.py
create mode 100644 rag-web-ui/backend/app/tools/srs_reqs_qwen/src/utils.py
create mode 100644 rag-web-ui/backend/app/tools/srs_reqs_qwen/tool.py
create mode 100644 rag-web-ui/backend/clean_db.py
create mode 100644 rag-web-ui/backend/diff.py
create mode 100644 rag-web-ui/backend/entrypoint.sh
create mode 100644 rag-web-ui/backend/nano_graphrag/__init__.py
create mode 100644 rag-web-ui/backend/nano_graphrag/_llm.py
create mode 100644 rag-web-ui/backend/nano_graphrag/_op.py
create mode 100644 rag-web-ui/backend/nano_graphrag/_splitter.py
create mode 100644 rag-web-ui/backend/nano_graphrag/_storage/__init__.py
create mode 100644 rag-web-ui/backend/nano_graphrag/_storage/gdb_neo4j.py
create mode 100644 rag-web-ui/backend/nano_graphrag/_storage/gdb_networkx.py
create mode 100644 rag-web-ui/backend/nano_graphrag/_storage/kv_json.py
create mode 100644 rag-web-ui/backend/nano_graphrag/_storage/vdb_hnswlib.py
create mode 100644 rag-web-ui/backend/nano_graphrag/_storage/vdb_nanovectordb.py
create mode 100644 rag-web-ui/backend/nano_graphrag/_utils.py
create mode 100644 rag-web-ui/backend/nano_graphrag/base.py
create mode 100644 rag-web-ui/backend/nano_graphrag/entity_extraction/__init__.py
create mode 100644 rag-web-ui/backend/nano_graphrag/entity_extraction/extract.py
create mode 100644 rag-web-ui/backend/nano_graphrag/entity_extraction/metric.py
create mode 100644 rag-web-ui/backend/nano_graphrag/entity_extraction/module.py
create mode 100644 rag-web-ui/backend/nano_graphrag/graphrag.py
create mode 100644 rag-web-ui/backend/nano_graphrag/prompt.py
create mode 100644 rag-web-ui/backend/requirements.txt
create mode 100644 rag-web-ui/backend/tests/__init__.py
create mode 100644 rag-web-ui/backend/tests/test_testing_pipeline.py
create mode 100644 rag-web-ui/backend/uploads/README.md
create mode 100644 rag-web-ui/docker-compose.dev.yml
create mode 100644 rag-web-ui/docker-compose.yml
create mode 100644 rag-web-ui/frontend/.dockerignore
create mode 100644 rag-web-ui/frontend/.gitignore
create mode 100644 rag-web-ui/frontend/Dockerfile
create mode 100644 rag-web-ui/frontend/Dockerfile.dev
create mode 100644 rag-web-ui/frontend/README.md
create mode 100644 rag-web-ui/frontend/components.json
create mode 100644 rag-web-ui/frontend/eslint.config.mjs
create mode 100644 rag-web-ui/frontend/next.config.js
create mode 100644 rag-web-ui/frontend/package.json
create mode 100644 rag-web-ui/frontend/pnpm-lock.yaml
create mode 100644 rag-web-ui/frontend/postcss.config.js
create mode 100644 rag-web-ui/frontend/postcss.config.mjs
create mode 100644 rag-web-ui/frontend/public/file.svg
create mode 100644 rag-web-ui/frontend/public/globe.svg
create mode 100644 rag-web-ui/frontend/public/logo.png
create mode 100644 rag-web-ui/frontend/public/logo.svg
create mode 100644 rag-web-ui/frontend/public/next.svg
create mode 100644 rag-web-ui/frontend/public/vercel.svg
create mode 100644 rag-web-ui/frontend/public/window.svg
create mode 100644 rag-web-ui/frontend/src/app/dashboard/api-keys/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/dashboard/chat/[id]/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/dashboard/chat/new/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/dashboard/chat/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/dashboard/consistency-analysis/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/dashboard/doc-processing/extract/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/dashboard/doc-processing/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/dashboard/doc-processing/test-case-gen/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/dashboard/knowledge/[id]/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/dashboard/knowledge/[id]/upload/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/dashboard/knowledge/new/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/dashboard/knowledge/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/dashboard/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/dashboard/test-retrieval/[id]/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/favicon.ico
create mode 100644 rag-web-ui/frontend/src/app/globals.css
create mode 100644 rag-web-ui/frontend/src/app/layout.tsx
create mode 100644 rag-web-ui/frontend/src/app/login/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/page.tsx
create mode 100644 rag-web-ui/frontend/src/app/register/page.tsx
create mode 100644 rag-web-ui/frontend/src/components/chat/answer.tsx
create mode 100644 rag-web-ui/frontend/src/components/knowledge-base/document-list.tsx
create mode 100644 rag-web-ui/frontend/src/components/knowledge-base/document-upload-steps.tsx
create mode 100644 rag-web-ui/frontend/src/components/layout/dashboard-layout.tsx
create mode 100644 rag-web-ui/frontend/src/components/theme-provider.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/accordion.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/badge.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/breadcrumb.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/button.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/card.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/dialog.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/divider.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/input.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/label.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/popover.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/progress.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/select.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/skeleton.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/switch.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/table.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/tabs.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/toast.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/toaster.tsx
create mode 100644 rag-web-ui/frontend/src/components/ui/use-toast.ts
create mode 100644 rag-web-ui/frontend/src/lib/api.ts
create mode 100644 rag-web-ui/frontend/src/lib/document-mock.ts
create mode 100644 rag-web-ui/frontend/src/lib/srs-tools-api.ts
create mode 100644 rag-web-ui/frontend/src/lib/utils.ts
create mode 100644 rag-web-ui/frontend/src/styles/globals.css
create mode 100644 rag-web-ui/frontend/tailwind.config.ts
create mode 100644 rag-web-ui/frontend/tsconfig.json
create mode 100644 rag-web-ui/nginx.conf
create mode 100644 rag-web-ui/nginx.dev.conf
diff --git a/.github/AGENTS.md b/.github/AGENTS.md
new file mode 100644
index 0000000..c433ad2
--- /dev/null
+++ b/.github/AGENTS.md
@@ -0,0 +1,24 @@
+# 测试生成智能体约定
+
+## 适用范围
+- 本工作区包含基于 Tool Calling 与 Skill Calling 的测试内容生成链路。
+- 当用户提出测试项分解、测试用例生成或预期成果生成需求时,必须触发 testing-orchestrator。
+
+## 已注册技能
+- identify-requirement-type:将用户需求文本识别为明确的测试需求类型,为后续测试项分解与测试用例生成提供分类依据。
+- decompose-test-items:按需求类型规则生成正常测试与异常测试测试项。
+- generate-test-cases:按测试项生成可执行测试用例,至少 1 条/测试项。
+- testing-orchestrator:按标准顺序编排工具调用并输出结构化结果。
+
+## 强制调用链
+1. identify-requirement-type
+2. decompose-test-items
+3. generate-test-cases
+4. build_expected_results
+5. format_output
+
+## 约束规则
+- 除非用户明确要求只看中间步骤,否则禁止跳步。
+- 每一步都必须显式接收上一步输出作为上下文输入。
+- 若无法识别类型,必须输出未知类型及候选类型,并继续执行通用分解。
+- 最终输出必须严格包含测试项、测试用例、预期成果三段,并按正常测试/异常测试分组。
diff --git a/.github/skills/decompose-test-items/SKILL.md b/.github/skills/decompose-test-items/SKILL.md
new file mode 100644
index 0000000..16f8e04
--- /dev/null
+++ b/.github/skills/decompose-test-items/SKILL.md
@@ -0,0 +1,46 @@
+---
+name: decompose-test-items
+description: "当需要基于需求类型把需求文本分解为可执行的正常/异常测试项时使用。"
+---
+
+# decompose-test-items
+
+## 目标
+基于用户需求文本和已识别需求类型,生成测试项列表。
+
+## 输入
+- user_requirement_text
+- requirement_type
+
+## 输出
+- normal_test_items:完整、可执行的正常测试项列表。
+- abnormal_test_items:完整、可执行的异常测试项列表。
+
+## 强制规则
+1. 每个软件功能至少应被正常测试与被认可的异常场景覆盖;复杂功能需继续细分。
+2. 每个测试项必须语义完整、可直接执行。
+3. 覆盖必须包含:正常流程、边界条件(适用时)、异常条件。
+4. 粒度需适中,避免过粗或过细。
+5. 对未知类型必须执行通用分解,并保持正常/异常分组。
+6. 对需求说明未显式给出但在用户手册或操作手册体现的功能,也应补充测试项覆盖。
+
+## 14类最小分解检查点
+- 功能测试:正常覆盖功能主路径、基本数据类型、合法边界值与状态转换;异常覆盖非法输入、不规则输入、非法边界值与最坏情况。
+- 性能测试:正常覆盖处理精度、响应时间、处理数据量与模块协调性;异常覆盖超负荷、软硬件限制、负载潜力上限与资源占用异常。
+- 外部接口测试:正常覆盖全部外部接口格式与内容正确性;异常覆盖每个输入输出接口的错误格式、错误内容与异常交互。
+- 人机交互界面测试:正常覆盖界面风格一致性与标准操作流程;异常覆盖误操作、快速操作、非法输入、错误命令与错误流程提示。
+- 强度测试:正常覆盖设计极限下系统功能和性能表现;异常覆盖超出极限时的降级行为、健壮性与饱和表现。
+- 余量测试:正常覆盖存储、通道、处理时间余量是否满足要求;异常覆盖余量不足或耗尽时系统告警与受控行为。
+- 可靠性测试:正常覆盖典型环境、运行剖面与输入变量组合;异常覆盖失效等级场景、边界环境变化、不合法输入域及失效记录。
+- 安全性测试:正常覆盖安全关键部件、安全结构与合法操作路径;异常覆盖危险状态、故障模式、边界接合部、非法进入与数据完整性保护。
+- 恢复性测试:正常覆盖故障探测、备用切换、恢复后继续执行;异常覆盖故障中作业保护、状态保护与恢复失败路径。
+- 边界测试:正常覆盖输入输出域边界、状态转换端点与功能界限;异常覆盖性能界限、容量界限和越界端点。
+- 安装性测试:正常覆盖标准及不同配置下安装卸载流程;异常覆盖安装规程错误、依赖异常与中断后的处理。
+- 互操作性测试:正常覆盖两个或多个软件同时运行与互操作过程;异常覆盖互操作失败、并行冲突与协同异常。
+- 敏感性测试:正常覆盖有效输入类中典型数据组合;异常覆盖引发不稳定或不正常处理的特殊数据组合。
+- 测试充分性要求:正常覆盖需求覆盖率、配置项覆盖与代码覆盖达标;异常覆盖未覆盖部分逐项分析、确认与报告输出。
+
+## 未知类型容错
+- 当 requirement_type 无法确定时,仍需输出正常/异常两组测试项。
+- 通用正常项至少包含:主流程正确性、合法边界值、标准输入输出。
+- 通用异常项至少包含:非法输入、越界输入、资源异常或状态冲突。
diff --git a/.github/skills/generate-test-cases/SKILL.md b/.github/skills/generate-test-cases/SKILL.md
new file mode 100644
index 0000000..5e83e99
--- /dev/null
+++ b/.github/skills/generate-test-cases/SKILL.md
@@ -0,0 +1,45 @@
+---
+name: generate-test-cases
+description: "当需要根据已分解测试项生成包含操作步骤与测试内容的具体测试用例时使用。"
+---
+
+# generate-test-cases
+
+## 目标
+按测试项生成测试用例,每个测试项至少对应 1 条用例。
+
+## 输入
+- normal_test_items
+- abnormal_test_items
+
+## 输出
+- normal_test_cases
+- abnormal_test_cases
+
+每条测试用例必须包含:
+- operation_steps
+- test_content
+- expected_result_placeholder
+
+## 规则
+1. 测试项与测试用例应保持一一对应关系。
+2. 每个测试项必须至少生成 1 条测试用例。
+3. 必须区分正常测试用例与异常测试用例。
+4. 操作步骤应可顺序执行,避免歧义。
+5. 操作步骤必须包含明确动作、对象和输入条件,禁止笼统动作词。
+6. test_content 必须包含可验证条件,便于后续生成可度量预期成果。
+
+## expected_result_placeholder 映射
+- {{return_value}}:接口或函数返回值验证。
+- {{state_change}}:系统状态变化验证。
+- {{error_message}}:异常场景错误信息验证。
+- {{data_persistence}}:数据库或存储落库结果验证。
+- {{ui_display}}:界面显示反馈验证。
+
+## 禁止模糊描述
+- 错误示例:"检查功能正常";正确示例:"验证返回状态码为200且响应体包含status=success"。
+- 错误示例:"输入合法数据";正确示例:"在用户名输入框输入长度为8的字母数字字符串并提交"。
+- 错误示例:"系统提示错误";正确示例:"触发非法输入后显示错误码E400和字段级提示文案"。
+
+## 预期结果耦合
+- 每条用例必须可在下一步绑定一条明确、可验证的预期成果。
diff --git a/.github/skills/identify-requirement-type/SKILL.md b/.github/skills/identify-requirement-type/SKILL.md
new file mode 100644
index 0000000..d6e45b1
--- /dev/null
+++ b/.github/skills/identify-requirement-type/SKILL.md
@@ -0,0 +1,62 @@
+---
+name: identify-requirement-type
+description: "当需要在测试项分解与测试用例生成之前识别需求类型时使用。"
+---
+
+# identify-requirement-type
+
+## 目标
+将用户需求文本识别为明确的测试需求类型,为后续测试项分解与测试用例生成提供分类依据。
+
+## 输入
+- user_requirement_text:用户原始需求文本。
+
+## 输出
+- requirement_type:以下之一
+ - 功能测试
+ - 性能测试
+ - 外部接口测试
+ - 人机交互界面测试
+ - 强度测试
+ - 余量测试
+ - 可靠性测试
+ - 安全性测试
+ - 恢复性测试
+ - 边界测试
+ - 安装性测试
+ - 互操作性测试
+ - 敏感性测试
+ - 测试充分性要求
+ - 未知类型
+- reason:简要判断依据。
+- candidates:当 requirement_type 为未知类型时,给出 1-3 个最接近候选类型。
+
+## 类型识别信号
+- 功能测试:关注功能需求逐项验证、业务流程正确性、输入输出行为、状态转换与边界值处理。
+- 性能测试:关注处理精度、响应时间、处理数据量、系统协调性、负载潜力与运行占用空间。
+- 外部接口测试:关注外部输入输出接口的格式、内容、协议与正常/异常交互表现。
+- 人机交互界面测试:关注界面一致性、界面风格、操作流程、误操作健壮性与错误提示能力。
+- 强度测试:关注系统在极限、超负荷、饱和和降级条件下的稳定性与承受能力。
+- 余量测试:关注存储余量、输入输出通道余量、功能处理时间余量等资源裕度。
+- 可靠性测试:关注真实或仿真环境下的失效等级、运行剖面、输入覆盖和长期稳定运行能力。
+- 安全性测试:关注危险状态响应、安全关键部件、异常输入防护、非法访问阻断和数据完整性保护。
+- 恢复性测试:关注故障探测、备用切换、系统状态保护与从无错误状态继续执行能力。
+- 边界测试:关注输入输出域边界、状态转换端点、功能界限、性能界限与容量界限。
+- 安装性测试:关注不同配置下安装卸载流程和安装规程执行正确性。
+- 互操作性测试:关注多个软件并行运行时的互操作能力与协同正确性。
+- 敏感性测试:关注有效输入类中可能引发不稳定或不正常处理的数据组合。
+- 测试充分性要求:关注需求覆盖率、配置项覆盖、语句覆盖、分支覆盖及未覆盖分析确认。
+
+## 规则
+1. 优先依据需求文本中的显式表述进行分类。
+2. 分类应以语义意图为主,不能只做关键词机械匹配。
+3. 置信度不足时输出未知类型,并提供候选类型。
+4. 判断依据需简洁、可追溯到文本证据。
+
+## 容错
+- 当需求描述过于笼统或跨多类型混合时,输出未知类型,并在 candidates 给出最接近类型。
+- 当识别不稳定时,优先保守分类,不强行归入单一类型。
+- 未知类型不阻断后续流程,应继续执行通用测试项分解。
+
+## 调试
+- debug 模式下返回每个类型的分类分数 classification_scores。
diff --git a/.github/skills/testing-orchestrator/SKILL.md b/.github/skills/testing-orchestrator/SKILL.md
new file mode 100644
index 0000000..d9b5be5
--- /dev/null
+++ b/.github/skills/testing-orchestrator/SKILL.md
@@ -0,0 +1,57 @@
+---
+name: testing-orchestrator
+description: "当用户要求测试项分解或测试用例生成且需要完整工具调用链时使用。"
+---
+
+# testing-orchestrator
+
+## 目标
+严格执行测试生成调用链,并显式传递每一步上下文。
+
+## 标准调用链
+1. identify-requirement-type
+2. decompose-test-items
+3. generate-test-cases
+4. build_expected_results
+5. format_output
+
+## 编排规则
+1. 优先使用 Skill 与 Tool,不使用临时硬编码逻辑替代。
+2. 除非用户明确要求,否则不得跳过任何步骤。
+3. 每一步必须显式接收上一步输出。
+4. 分类失败时输出未知类型并继续执行通用分解。
+
+## 输出模板
+最终输出必须严格遵循以下分组结构:
+
+**测试项**
+
+**正常测试**:
+1. [测试项 N1]:...
+
+**异常测试**:
+1. [测试项 E1]:...
+
+**测试用例**
+
+**正常测试**:
+1. [用例 N1](对应测试项 N1):...
+
+**异常测试**:
+1. [用例 E1](对应测试项 E1):...
+
+**预期成果**
+
+**正常测试**:
+1. [预期 N1](对应用例 N1):...
+
+**异常测试**:
+1. [预期 E1](对应用例 E1):...
+
+## 调试模式
+当 debug=true 时,输出步骤日志并包含:
+- step_name
+- input_summary
+- output_summary
+- success
+- fallback_used
diff --git a/.github/测试项分解要求.md b/.github/测试项分解要求.md
new file mode 100644
index 0000000..001526d
--- /dev/null
+++ b/.github/测试项分解要求.md
@@ -0,0 +1,108 @@
+5.4.5.1功能测试
+功能测试是对软件需求规格说明中的功能需求逐项进行的测试,以验证其功能是否满足要求。功能测试一般需进行:
+1. 每一个软件功能应至少被一个测试用例和一个被认可的异常所覆盖,对大的功能应进一步分解为更细的功能,使测试用例可以直接和功能对应:
+2. 用基本数据类型和数据值测试:
+3. 用一系列合理的数据类型和数据值运行,测试超负荷、饱和及其它“最坏情况”的结果;
+4. 用假想的数据类型和数据值运行,测试排斥不规则输入的能力;
+5. 每个功能的合法边界值和非法边界值都应被作为测试用例;
+6. 应考虑软件功能对操作模式、运行环境、运行状态、状态转换、运行时间等的覆盖要求;
+7. 对于在需求规格说明中没有指明,而在用户使用手册、操作手册中表明出来的每一功能及操作,都应有相应测试用例覆盖。
+
+5.4.5.2性能测试
+性能测试是对软件需求规格说明中的性能需求逐项进行的测试,以验证其性能是否满足要求。性能测试一般需进行:
+1. 测试程序在获得定量结果时程序计算的精确性(处理精度. ;
+2. 测试程序在有速度要求时完成功能的时间(响应时间. ;
+3. 测试程序完成功能所能处理的数据量;
+4. 测试程序各部分的协调性,如高速、低速操作的协调:
+5. 测试软/硬件中因素是否限制了程序的性能;
+6. 测试程序的负载潜力;
+7. 测试程序运行占用的空间。
+
+5.4.5.3外部接口测试
+外部接口测试是对软件需求规格说明中的外部接口需求逐项进行的测试。外部接口测试一般需进行:
+1. 测试所有外部接口,检查接口信息的格式及内容;
+2. 对每一个外部的输入/输出接口做正常和异常情况的测试。
+
+5.4.5.4人机交互界面测试
+人机交互界面测试是对所有人机交互界面提供的操作和显示界面进行的测试,以检验是否满足用户的要求。人机交互界面测试一般需进行:
+1. 测试操作和显示界面及界面风格与软件需求规格说明中要求的一致性和符合性:
+2. 以非常规操作、误操作、快速操作来检验界面的健壮性;
+3. 测试对错误命令或非法数据输入的检测能力与提示情况;
+4. 测试对错误操作流程的检测与提示:
+5. 如果有用户手册或操作手册,应对照手册逐条进行操作和观察。
+
+5.4.5.5强度测试
+强度测试是强制软件运行在不正常到发生故障的情况下(设计的极限状态到超出极限. ,检验软件可以运行到何种程度的测试。强度测试一般需进行:
+1. 性能的强度测试;
+2. 降级能力的强度测试;
+3. 系统健壮性测试;
+4. 系统饱和测试。
+强度测试在某种程度上可看作性能测试的延伸,测出软件功能、性能的实际极限。其详细要求可参见附录B“强度测试”。
+
+5.4.5.6余量测试
+余量测试是对软件是否达到需求规格说明中要求的余量的测试。若无明确要求时,一般至少留有20%的余量。根据测试要求,余量测试一般需提供:
+1. 全部存储量的余量;
+2. 输入、输出及通道的余量;
+3. 功能处理时间的余量。
+
+5.4.5.7可靠性测试
+可靠性测试是在真实的和仿真的环境中,为做出软件可靠性估计而对软件进行的功能测试(其输入覆盖和环境覆盖一般大于普通的功能测试. ,可靠性测试中必须按照运行剖面和使用的概率分布随机地选择测试用例。可靠性测试一般需:
+1. 测试环境应与典型使用环境的统计特性相一致,必要时使用测试平台;
+2. 定义软件失效等级;
+3. 建立软件运行剖面/操作剖面;
+4. 测试记录更为详细、准确,应记录失效现象和时间;
+5. 必须保证输入覆盖,应覆盖重要的输入变量值、各种使用功能、相关输入变量可能组合以及不合法输入域等;
+6. 对于可能导致软件运行方式改变的一些边界条件和环境条件,必须进行针对性测试。
+有关可靠性测试的详细要求参见附录C“可靠性测试”。
+
+5.4.5.8安全性测试
+A、B、C级软件需要进行安全性测试。安全性测试是检验软件中已存在的安全性、安
+全保密性措施是否有效的测试。安全性测试一般:
+1. 应进行软件安全性分析,并且在软件需求中明确每一个危险状态及导致危险的可能原因,在测试中全面检验软件在这些危险状态下的反应;
+2. 对安全性关键的软件部件,应单独测试,以确认该软件部件满足安全性需求:
+3. 对软件设计中用于提高安全性的结构、算法、容错、冗余、中断处理等方案应进行针对性测试;
+4. 测试应尽可能在符合实际使用的条件下进行;
+5. 除在正常条件下测试外,应在异常条件下测试软件,以表明不会因可能的单个或多个输入错误而导致不安全状态;
+6. 应包含硬件及软件输入故障模式测试:
+7. 应包含边界、界外及边界接合部的测试;
+8. 应包括“0”、穿越“0”以及从两个方向趋近于“0”的输入值;
+9. 应包含在最坏情况配置下的最小和最大输入数据率,以确定系统的固有能力及对这些环境的反应;
+10. 操作员接口测试应包括在安全性关键操作中的操作员错误,以验证安全系统对这些错误的响应;
+11. 应测试双工切换、多机替换的正确性和连续性;
+12. 应测试防止非法进入系统并保护系统数据完整性的能力。
+
+5.4.5.9恢复性测试
+恢复性测试是对有恢复或重置(reset. 功能的软件的每一类导致恢复或重置的情况,逐一进行的测试,以验证其恢复或重置功能。恢复性测试是要证实在克服硬件故障后,系统能否正常地继续进行工作,且不对系统造成任何损害。恢复性测试一般需进行:
+1. 探测错误功能的测试;
+2. 能否切换或自动启动备用硬件的测试;
+3. 在故障发生时能否保护正在运行的作业和系统状态的测试;
+4. 在系统恢复后,能否从最后记录下来的无错误状态开始继续执行作业的测试。
+
+5.4.5.10边界测试
+边界测试是对软件处在边界或端点情况下运行状态的测试。边界测试一般需进行:
+1. 软件的输入域或输出域的边界或端点的测试;
+2. 状态转换的边界或端点的测试;
+3. 功能界限的边界或端点的测试;
+4. 性能界限的边界或端点的测试;
+5. 容量界限的边界或端点的测试。
+
+5.4.5.11安装性测试
+安装性测试是对安装过程是否符合安装规程的测试,以发现安装过程中的错误。安装性测试一般需进行:
+1. 不同配置下的安装和卸载测试;
+2. 安装规程的正确性的测试。
+
+5.4.5.12互操作性测试
+互操作性测试是为验证不同软件之间的互操作能力而进行的测试。互操作性测试一般:
+1. 必须同时运行两个或多个不同的软件;
+2. 软件之间发生互操作。
+
+5.4.5.13敏感性测试
+敏感性测试是为发现在有效输入类中可能引起某种不稳定性或不正常处理的某些数据的组合而进行的测试。敏感性测试一般需进行:
+1. 发现有效输入类中可能引起某种不稳定性的数据组合的测试;
+2. 发现有效输入类中可能引起某种不正常处理的数据组合的测试。
+
+5.4.5.14测试充分性要求
+1. 对软件需求规格说明中明确和隐含的需求(包括功能、性能、接口、质量要求等. 的覆盖率应达到100%;
+2. 配置项测试应使用与软件开发相同的编译器,全面覆盖软件需求说明文档中的所有要求。
+3. 对于A、B级嵌入式软件,对配置项源程序测试的语句、分支覆盖率均应达到100%。对用高级语言编制的A、B级嵌入式软件,应对配置项目标码进行结构分析和测试,测试的目标码语句、分支覆盖率均应达到100%。对覆盖率达不到要求的软件,应对未覆盖的部分逐一进行分析和确认,并提供分析报告。
+
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..66112f6
--- /dev/null
+++ b/README.md
@@ -0,0 +1,164 @@
+
+
+## 简介
+RAG Web UI 是一个面向企业知识库场景的全栈项目,提供从文档接入、向量化检索、对话问答到文档工程化处理的完整链路。
+
+项目采用前后端分离架构:
+- 后端基于 FastAPI,负责认证、知识库、向量检索、文档处理任务与工具调用。
+- 前端基于 Next.js,提供知识库管理、聊天、文档处理等可视化页面。
+- 基础设施使用 MySQL、ChromaDB、MinIO,可通过 Docker Compose 一键启动。
+
+## 核心能力
+- 知识库管理:支持 PDF、DOCX、Markdown、TXT 上传,预览、异步处理、状态追踪。
+- RAG 对话:支持多轮上下文问答,结合知识库检索结果生成回答。
+- 文档处理中心:提供需求提取、测试内容生成等工程化能力。
+- 工具中心:后端内置工具注册与调用机制,支持后续扩展更多可被模型调用的工具。
+- 多模型支持:支持 OpenAI、DashScope、DeepSeek、Ollama 等配置方式。
+
+## 项目结构
+```text
+rag-web-ui/
+ backend/ # FastAPI 后端
+ app/
+ api/api_v1/ # 业务 API 路由(auth/chat/knowledge-base/testing/tools)
+ services/ # 文档处理、检索、向量存储等服务层
+ tools/ # 工具中心(包含 SRS 需求提取工具)
+ models/ # SQLAlchemy 模型
+ schemas/ # Pydantic 数据模型
+ alembic/ # 数据库迁移
+ frontend/ # Next.js 前端
+ src/app/ # 页面与路由
+ src/components/ # 组件
+ src/lib/ # API 封装、工具调用封装
+ docs/ # 文档与教程
+ docker-compose.yml # 标准部署编排
+ docker-compose.dev.yml # 开发调试编排
+```
+
+## 快速开始
+### 环境要求
+- Docker 与 Docker Compose v2+
+- Node.js 18+
+- Python 3.9+
+- 推荐 8GB 及以上内存
+
+### 1. 准备环境变量
+```bash
+cp .env.example .env
+```
+
+### 2. 启动服务
+```bash
+docker compose up -d --build
+```
+
+### 3. 访问服务
+- 前端页面: http://127.0.0.1.nip.io
+- API 文档: http://127.0.0.1.nip.io/redoc
+- MinIO 控制台: http://127.0.0.1.nip.io:9001
+
+## 配置说明
+### 核心配置
+| 配置项 | 说明 | 示例 |
+| --- | --- | --- |
+| MYSQL_SERVER | MySQL 主机 | localhost 或 db |
+| MYSQL_PORT | MySQL 端口 | 3306 |
+| MYSQL_USER | MySQL 用户名 | ragagent |
+| MYSQL_PASSWORD | MySQL 密码 | ragagent |
+| MYSQL_DATABASE | MySQL 库名 | ragagent |
+| SECRET_KEY | JWT 密钥 | 自定义随机字符串 |
+| ACCESS_TOKEN_EXPIRE_MINUTES | Token 过期时间(分钟) | 10080 |
+
+### 模型与向量配置
+| 配置项 | 说明 | 示例 |
+| --- | --- | --- |
+| CHAT_PROVIDER | 对话模型提供商 | dashscope/openai/deepseek/ollama |
+| EMBEDDINGS_PROVIDER | 向量模型提供商 | dashscope/openai/ollama |
+| DASH_SCOPE_API_KEY | DashScope Key | sk-xxx |
+| DASH_SCOPE_CHAT_MODEL | DashScope 对话模型 | qwen3.5-plus|
+| DASH_SCOPE_EMBEDDINGS_MODEL | DashScope 向量模型 | text-embedding-v4 |
+| VECTOR_STORE_TYPE | 向量库类型 | chroma 或 qdrant |
+
+说明:
+- 当使用 DashScope 兼容模式时,建议使用文本向量模型(例如 text-embedding-v4)。
+- 项目支持通过 API_KEY 作为统一兜底密钥,再由各 provider 配置覆盖。
+
+### 存储配置
+| 配置项 | 说明 | 示例 |
+| --- | --- | --- |
+| MINIO_ENDPOINT | MinIO 地址 | localhost:9000 |
+| MINIO_ACCESS_KEY | MinIO 用户名 | minioadmin |
+| MINIO_SECRET_KEY | MinIO 密码 | minioadmin |
+| MINIO_BUCKET_NAME | 桶名 | documents |
+
+## API 概览
+后端统一前缀为 /api。
+
+主要路由:
+- /api/auth: 登录、注册、令牌。
+- /api/knowledge-base: 知识库与文档上传/处理/任务状态。
+- /api/chat: 会话与消息。
+- /api/testing: 测试内容生成流水线。
+- /api/tools: 工具中心(包含 SRS 需求提取任务接口)。
+
+## 开发与测试
+### 前端类型检查
+```bash
+cd frontend
+pnpm exec tsc --noEmit
+```
+
+### 后端测试
+```bash
+cd backend
+python -m pytest tests/test_testing_pipeline.py
+```
+
+### 数据库迁移
+```bash
+cd backend
+alembic upgrade head
+```
+
+## 常见问题
+### 1) 提示某张表不存在(例如 tool_jobs)
+原因:数据库迁移未执行到最新版本。
+
+处理:执行 alembic upgrade head,或重启后端让启动迁移自动执行。
+
+### 2) 知识库文档处理失败并出现向量模型错误
+原因:模型配置与 provider 兼容性不匹配,或账号配额不足。
+
+处理:
+- 检查 EMBEDDINGS_PROVIDER 与模型名是否匹配。
+- DashScope 兼容模式优先使用 text-embedding-v4。
+- 若报配额不足(AllocationQuota.FreeTierOnly),请切换可用 API Key 或开通付费资源。
+
+### 3) 重复点击“开始处理”后状态异常
+后端已增加幂等处理与重试兜底,仍建议单次提交后等待任务轮询完成再重复操作。
+
+## 许可证
+本项目基于 LICENSE 文件中定义的条款发布。
\ No newline at end of file
diff --git a/rag-web-ui/.env.example b/rag-web-ui/.env.example
new file mode 100644
index 0000000..ebb9e42
--- /dev/null
+++ b/rag-web-ui/.env.example
@@ -0,0 +1,55 @@
+PROJECT_NAME=RAG Agent
+VERSION=0.1.0
+API_V1_STR=/api
+
+MYSQL_SERVER=localhost
+MYSQL_PORT=3306
+MYSQL_USER=ragagent
+MYSQL_PASSWORD=ragagent
+MYSQL_DATABASE=ragagent
+
+API_KEY= # API Key,服务商官网获取
+
+OPENAI_API_KEY= # API Key,和上面一致
+OPENAI_API_BASE= # base-url,服务商官网获取
+OPENAI_MODEL= # 文本生成模型名称,例如 qwen3.5-plus
+OPENAI_EMBEDDINGS_MODEL= # 向量模型名称,例如 text-embedding-v4
+
+SECRET_KEY=dev-secret-key-change-me
+ACCESS_TOKEN_EXPIRE_MINUTES=10080
+
+CHAT_PROVIDER=dashscope
+EMBEDDINGS_PROVIDER=dashscope
+
+DASH_SCOPE_API_KEY= # API Key,和上面一致
+DASH_SCOPE_API_BASE= # base-url,和上面一致
+DASH_SCOPE_CHAT_MODEL= # 文本生成模型名称
+DASH_SCOPE_EMBEDDINGS_MODEL= # 向量模型名称
+
+VECTOR_STORE_TYPE=chroma
+CHROMA_DB_HOST=localhost
+CHROMA_DB_PORT=8001
+
+MINIO_ENDPOINT=localhost:9000
+MINIO_ACCESS_KEY=minioadmin
+MINIO_SECRET_KEY=minioadmin
+MINIO_BUCKET_NAME=documents
+
+RERANKER_API_URL= # reranker 模型的 base-url,服务商官网获取
+RERANKER_API_KEY= # API Key,和上面一致
+RERANKER_MODEL= # reranker 模型名称,例如 qwen3-vl-rerank
+RERANKER_TIMEOUT_SECONDS=10
+RERANKER_WEIGHT=0.75
+
+GRAPHRAG_ENABLED=true
+GRAPHRAG_WORKING_DIR=./graphrag_cache
+GRAPHRAG_GRAPH_STORAGE=neo4j
+GRAPHRAG_QUERY_LEVEL=2
+GRAPHRAG_LOCAL_TOP_K=20
+GRAPHRAG_ENTITY_EXTRACT_MAX_GLEANING=1
+GRAPHRAG_EMBEDDING_DIM=1024
+GRAPHRAG_EMBEDDING_MAX_TOKEN_SIZE=8192
+
+NEO4J_URL=bolt://localhost:7687
+NEO4J_USERNAME=neo4j
+NEO4J_PASSWORD=12345678
diff --git a/rag-web-ui/.gitattributes b/rag-web-ui/.gitattributes
new file mode 100644
index 0000000..2680858
--- /dev/null
+++ b/rag-web-ui/.gitattributes
@@ -0,0 +1,27 @@
+# Set default behavior to automatically normalize line endings
+* text=auto
+
+# Unix/Linux/macOS style files (using LF)
+*.sh text eol=lf
+*.bash text eol=lf
+Dockerfile text eol=lf
+.dockerignore text eol=lf
+docker-compose*.yml text eol=lf
+*.py text eol=lf
+*.json text eol=lf
+*.yml text eol=lf
+*.yaml text eol=lf
+*.md text eol=lf
+
+# Windows style files (using CRLF)
+*.{cmd,[cC][mM][dD]} text eol=crlf
+*.{bat,[bB][aA][tT]} text eol=crlf
+*.ps1 text eol=crlf
+
+# Binary files (no conversion)
+*.png binary
+*.jpg binary
+*.gif binary
+*.ico binary
+*.zip binary
+*.pdf binary
diff --git a/rag-web-ui/.gitignore b/rag-web-ui/.gitignore
new file mode 100644
index 0000000..3cf77da
--- /dev/null
+++ b/rag-web-ui/.gitignore
@@ -0,0 +1,60 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+.pytest_cache/
+.coverage
+htmlcov/
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+chroma_db/
+
+# Node/Next.js
+node_modules/
+.next/
+out/
+.DS_Store
+*.pem
+.env.local
+.env.development.local
+.env.test.local
+.env.production.local
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+.vercel
+.turbo
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+*~
+
+# Project specific
+backend/static/
+backend/media/
+frontend/.env
+backend/.env
diff --git a/rag-web-ui/LICENSE b/rag-web-ui/LICENSE
new file mode 100644
index 0000000..74c86af
--- /dev/null
+++ b/rag-web-ui/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright 2024 Marcus Schiesser
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/rag-web-ui/backend/Dockerfile b/rag-web-ui/backend/Dockerfile
new file mode 100644
index 0000000..9b127b2
--- /dev/null
+++ b/rag-web-ui/backend/Dockerfile
@@ -0,0 +1,35 @@
+FROM python:3.11-slim
+
+WORKDIR /app
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ build-essential \
+ default-libmysqlclient-dev \
+ pkg-config \
+ netcat-traditional \
+ curl \
+ && rm -rf /var/lib/apt/lists/*
+
+# Copy requirements file
+COPY requirements.txt .
+
+# Install Python packages
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy entrypoint script first
+COPY entrypoint.sh .
+RUN chmod +x entrypoint.sh
+
+# Copy application files
+COPY . .
+
+# Create uploads directory
+RUN mkdir -p uploads
+
+# Set Python path and environment
+ENV PYTHONPATH=/app
+ENV ENVIRONMENT=production
+
+# Run the application
+CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
diff --git a/rag-web-ui/backend/Dockerfile.dev b/rag-web-ui/backend/Dockerfile.dev
new file mode 100644
index 0000000..8e6c5b5
--- /dev/null
+++ b/rag-web-ui/backend/Dockerfile.dev
@@ -0,0 +1,37 @@
+FROM python:3.11-slim
+
+WORKDIR /app
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ build-essential \
+ default-libmysqlclient-dev \
+ pkg-config \
+ netcat-traditional \
+ curl \
+ && rm -rf /var/lib/apt/lists/*
+
+# Copy requirements file
+COPY requirements.txt .
+
+# Install Python packages with retry mechanism
+RUN pip install --no-cache-dir -r requirements.txt || \
+ (echo "Retrying in 5 seconds..." && sleep 5 && pip install --no-cache-dir -r requirements.txt) || \
+ (echo "Retrying in 10 seconds..." && sleep 10 && pip install --no-cache-dir -r requirements.txt)
+
+# Copy entrypoint script first
+COPY entrypoint.sh .
+RUN chmod +x entrypoint.sh
+
+# Copy the rest of the application
+COPY . .
+
+# Create uploads directory
+RUN mkdir -p uploads
+
+# Set Python path and environment
+ENV PYTHONPATH=/app
+ENV ENVIRONMENT=development
+
+# Run the application
+ENTRYPOINT ["./entrypoint.sh"]
diff --git a/rag-web-ui/backend/__init__.py b/rag-web-ui/backend/__init__.py
new file mode 100644
index 0000000..0519ecb
--- /dev/null
+++ b/rag-web-ui/backend/__init__.py
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/rag-web-ui/backend/alembic.ini b/rag-web-ui/backend/alembic.ini
new file mode 100644
index 0000000..7d79c78
--- /dev/null
+++ b/rag-web-ui/backend/alembic.ini
@@ -0,0 +1,42 @@
+[alembic]
+script_location = alembic
+sqlalchemy.url = mysql+mysqlconnector://ragagent:ragagent@db/ragagent
+
+[loggers]
+keys = root,sqlalchemy,alembic,uvicorn
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[logger_uvicorn]
+level = INFO
+handlers =
+qualname = uvicorn
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
\ No newline at end of file
diff --git a/rag-web-ui/backend/alembic/env.py b/rag-web-ui/backend/alembic/env.py
new file mode 100644
index 0000000..4f4dd4a
--- /dev/null
+++ b/rag-web-ui/backend/alembic/env.py
@@ -0,0 +1,90 @@
+import os
+import sys
+from logging.config import fileConfig
+from sqlalchemy import engine_from_config
+from sqlalchemy import pool
+from alembic import context
+
+# Add the parent directory to Python path
+sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from app.models.base import Base
+from app.models.user import User
+from app.models.knowledge import KnowledgeBase, Document
+from app.models.chat import Chat, Message
+from app.models.tooling import ToolJob, SRSExtraction, SRSRequirement
+from app.core.config import settings
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+if config.config_file_name is not None:
+ fileConfig(config.config_file_name)
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+target_metadata = Base.metadata
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+def get_url():
+ return settings.get_database_url
+
+def run_migrations_offline() -> None:
+ """Run migrations in 'offline' mode.
+
+ This configures the context with just a URL
+ and not an Engine, though an Engine is acceptable
+ here as well. By skipping the Engine creation
+ we don't even need a DBAPI to be available.
+
+ Calls to context.execute() here emit the given string to the
+ script output.
+
+ """
+ url = get_url()
+ context.configure(
+ url=url,
+ target_metadata=target_metadata,
+ literal_binds=True,
+ dialect_opts={"paramstyle": "named"},
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+def run_migrations_online() -> None:
+ """Run migrations in 'online' mode.
+
+ In this scenario we need to create an Engine
+ and associate a connection with the context.
+
+ """
+ configuration = config.get_section(config.config_ini_section)
+ configuration["sqlalchemy.url"] = get_url()
+ connectable = engine_from_config(
+ configuration,
+ prefix="sqlalchemy.",
+ poolclass=pool.NullPool,
+ )
+
+ with connectable.connect() as connection:
+ context.configure(
+ connection=connection, target_metadata=target_metadata
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+if context.is_offline_mode():
+ run_migrations_offline()
+else:
+ run_migrations_online()
\ No newline at end of file
diff --git a/rag-web-ui/backend/alembic/script.py.mako b/rag-web-ui/backend/alembic/script.py.mako
new file mode 100644
index 0000000..fbc4b07
--- /dev/null
+++ b/rag-web-ui/backend/alembic/script.py.mako
@@ -0,0 +1,26 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision: str = ${repr(up_revision)}
+down_revision: Union[str, None] = ${repr(down_revision)}
+branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
+depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
+
+
+def upgrade() -> None:
+ ${upgrades if upgrades else "pass"}
+
+
+def downgrade() -> None:
+ ${downgrades if downgrades else "pass"}
diff --git a/rag-web-ui/backend/alembic/versions/3580c0dcd005_increase_api_key_length.py b/rag-web-ui/backend/alembic/versions/3580c0dcd005_increase_api_key_length.py
new file mode 100644
index 0000000..5c3caa5
--- /dev/null
+++ b/rag-web-ui/backend/alembic/versions/3580c0dcd005_increase_api_key_length.py
@@ -0,0 +1,36 @@
+"""increase_api_key_length
+
+Revision ID: 3580c0dcd005
+Revises: e214adf7fb66
+Create Date: 2024-01-20 14:25:00.000000
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision: str = '3580c0dcd005'
+down_revision: Union[str, None] = 'e214adf7fb66'
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column('api_keys', 'key',
+ existing_type=sa.String(length=64),
+ type_=sa.String(length=128),
+ existing_nullable=False)
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column('api_keys', 'key',
+ existing_type=sa.String(length=128),
+ type_=sa.String(length=64),
+ existing_nullable=False)
+ # ### end Alembic commands ###
diff --git a/rag-web-ui/backend/alembic/versions/59cfa0f1361d_rename_metadata_to_chunk_metadata.py b/rag-web-ui/backend/alembic/versions/59cfa0f1361d_rename_metadata_to_chunk_metadata.py
new file mode 100644
index 0000000..a63a72c
--- /dev/null
+++ b/rag-web-ui/backend/alembic/versions/59cfa0f1361d_rename_metadata_to_chunk_metadata.py
@@ -0,0 +1,116 @@
+"""rename_metadata_to_chunk_metadata
+
+Revision ID: 59cfa0f1361d
+Revises: initial_schema
+Create Date: 2025-01-13 23:26:38.232326
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+# revision identifiers, used by Alembic.
+revision: str = '59cfa0f1361d'
+down_revision: Union[str, None] = 'initial_schema'
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_index(op.f('ix_chats_id'), 'chats', ['id'], unique=False)
+ op.add_column('document_chunks', sa.Column('document_id', sa.Integer(), nullable=False))
+ op.add_column('document_chunks', sa.Column('chunk_metadata', sa.JSON(), nullable=True))
+ op.alter_column('document_chunks', 'created_at',
+ existing_type=mysql.TIMESTAMP(),
+ type_=sa.DateTime(),
+ nullable=False,
+ existing_server_default=sa.text('CURRENT_TIMESTAMP'))
+ op.alter_column('document_chunks', 'updated_at',
+ existing_type=mysql.TIMESTAMP(),
+ type_=sa.DateTime(),
+ nullable=False,
+ existing_server_default=sa.text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'))
+ op.drop_index('idx_hash', table_name='document_chunks')
+ op.create_index(op.f('ix_document_chunks_hash'), 'document_chunks', ['hash'], unique=False)
+ op.create_foreign_key(None, 'document_chunks', 'knowledge_bases', ['kb_id'], ['id'])
+ op.create_foreign_key(None, 'document_chunks', 'documents', ['document_id'], ['id'])
+ op.drop_column('document_chunks', 'metadata')
+ op.drop_index('idx_file_hash', table_name='documents')
+ op.create_index(op.f('ix_documents_file_hash'), 'documents', ['file_hash'], unique=False)
+ op.create_index(op.f('ix_documents_id'), 'documents', ['id'], unique=False)
+ op.create_index(op.f('ix_knowledge_bases_id'), 'knowledge_bases', ['id'], unique=False)
+ op.create_index(op.f('ix_messages_id'), 'messages', ['id'], unique=False)
+ op.alter_column('processing_tasks', 'knowledge_base_id',
+ existing_type=mysql.INTEGER(),
+ nullable=True)
+ op.alter_column('processing_tasks', 'document_id',
+ existing_type=mysql.INTEGER(),
+ nullable=True)
+ op.alter_column('processing_tasks', 'status',
+ existing_type=mysql.VARCHAR(length=50),
+ nullable=True)
+ op.alter_column('processing_tasks', 'created_at',
+ existing_type=mysql.DATETIME(),
+ nullable=True)
+ op.alter_column('processing_tasks', 'updated_at',
+ existing_type=mysql.DATETIME(),
+ nullable=True)
+ op.create_index(op.f('ix_processing_tasks_id'), 'processing_tasks', ['id'], unique=False)
+ op.drop_index('email', table_name='users')
+ op.drop_index('username', table_name='users')
+ op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
+ op.create_index(op.f('ix_users_id'), 'users', ['id'], unique=False)
+ op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index(op.f('ix_users_username'), table_name='users')
+ op.drop_index(op.f('ix_users_id'), table_name='users')
+ op.drop_index(op.f('ix_users_email'), table_name='users')
+ op.create_index('username', 'users', ['username'], unique=True)
+ op.create_index('email', 'users', ['email'], unique=True)
+ op.drop_index(op.f('ix_processing_tasks_id'), table_name='processing_tasks')
+ op.alter_column('processing_tasks', 'updated_at',
+ existing_type=mysql.DATETIME(),
+ nullable=False)
+ op.alter_column('processing_tasks', 'created_at',
+ existing_type=mysql.DATETIME(),
+ nullable=False)
+ op.alter_column('processing_tasks', 'status',
+ existing_type=mysql.VARCHAR(length=50),
+ nullable=False)
+ op.alter_column('processing_tasks', 'document_id',
+ existing_type=mysql.INTEGER(),
+ nullable=False)
+ op.alter_column('processing_tasks', 'knowledge_base_id',
+ existing_type=mysql.INTEGER(),
+ nullable=False)
+ op.drop_index(op.f('ix_messages_id'), table_name='messages')
+ op.drop_index(op.f('ix_knowledge_bases_id'), table_name='knowledge_bases')
+ op.drop_index(op.f('ix_documents_id'), table_name='documents')
+ op.drop_index(op.f('ix_documents_file_hash'), table_name='documents')
+ op.create_index('idx_file_hash', 'documents', ['file_hash'], unique=False)
+ op.add_column('document_chunks', sa.Column('metadata', mysql.JSON(), nullable=True))
+ op.drop_constraint(None, 'document_chunks', type_='foreignkey')
+ op.drop_constraint(None, 'document_chunks', type_='foreignkey')
+ op.drop_index(op.f('ix_document_chunks_hash'), table_name='document_chunks')
+ op.create_index('idx_hash', 'document_chunks', ['hash'], unique=False)
+ op.alter_column('document_chunks', 'updated_at',
+ existing_type=sa.DateTime(),
+ type_=mysql.TIMESTAMP(),
+ nullable=True,
+ existing_server_default=sa.text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'))
+ op.alter_column('document_chunks', 'created_at',
+ existing_type=sa.DateTime(),
+ type_=mysql.TIMESTAMP(),
+ nullable=True,
+ existing_server_default=sa.text('CURRENT_TIMESTAMP'))
+ op.drop_column('document_chunks', 'chunk_metadata')
+ op.drop_column('document_chunks', 'document_id')
+ op.drop_index(op.f('ix_chats_id'), table_name='chats')
+ # ### end Alembic commands ###
diff --git a/rag-web-ui/backend/alembic/versions/5be054bd6587_add_document_upload_id_to_processing_.py b/rag-web-ui/backend/alembic/versions/5be054bd6587_add_document_upload_id_to_processing_.py
new file mode 100644
index 0000000..d8f660c
--- /dev/null
+++ b/rag-web-ui/backend/alembic/versions/5be054bd6587_add_document_upload_id_to_processing_.py
@@ -0,0 +1,37 @@
+"""add_document_upload_id_to_processing_tasks
+
+Revision ID: 5be054bd6587
+Revises: fd73eebc87c1
+Create Date: 2025-01-14 01:17:24.164593
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision: str = '5be054bd6587'
+down_revision: Union[str, None] = 'fd73eebc87c1'
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # 1. 添加 document_upload_id 字段
+ op.execute("""
+ ALTER TABLE processing_tasks
+ ADD COLUMN document_upload_id INT,
+ ADD CONSTRAINT processing_tasks_document_upload_id_fkey
+ FOREIGN KEY (document_upload_id) REFERENCES document_uploads(id)
+ """)
+
+
+def downgrade() -> None:
+ # 1. 删除外键约束和字段
+ op.execute("""
+ ALTER TABLE processing_tasks
+ DROP FOREIGN KEY processing_tasks_document_upload_id_fkey,
+ DROP COLUMN document_upload_id
+ """)
diff --git a/rag-web-ui/backend/alembic/versions/a4f9c89b7d11_add_tool_jobs_and_srs_tables.py b/rag-web-ui/backend/alembic/versions/a4f9c89b7d11_add_tool_jobs_and_srs_tables.py
new file mode 100644
index 0000000..3ed3668
--- /dev/null
+++ b/rag-web-ui/backend/alembic/versions/a4f9c89b7d11_add_tool_jobs_and_srs_tables.py
@@ -0,0 +1,102 @@
+"""add tool jobs and srs tables
+
+Revision ID: a4f9c89b7d11
+Revises: 3580c0dcd005
+Create Date: 2026-04-12 18:00:00.000000
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+
+# revision identifiers, used by Alembic.
+revision: str = "a4f9c89b7d11"
+down_revision: Union[str, None] = "3580c0dcd005"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ op.create_table(
+ "tool_jobs",
+ sa.Column("id", sa.Integer(), nullable=False),
+ sa.Column("user_id", sa.Integer(), nullable=False),
+ sa.Column("tool_name", sa.String(length=128), nullable=False),
+ sa.Column("status", sa.String(length=32), nullable=False, server_default="pending"),
+ sa.Column("input_file_name", sa.String(length=255), nullable=False),
+ sa.Column("input_file_path", sa.String(length=512), nullable=False),
+ sa.Column("error_message", sa.Text(), nullable=True),
+ sa.Column("started_at", sa.DateTime(), nullable=True),
+ sa.Column("completed_at", sa.DateTime(), nullable=True),
+ sa.Column("output_summary", sa.JSON(), nullable=True),
+ sa.Column("created_at", sa.DateTime(), nullable=False),
+ sa.Column("updated_at", sa.DateTime(), nullable=False),
+ sa.ForeignKeyConstraint(["user_id"], ["users.id"]),
+ sa.PrimaryKeyConstraint("id"),
+ )
+ op.create_index(op.f("ix_tool_jobs_id"), "tool_jobs", ["id"], unique=False)
+ op.create_index(op.f("ix_tool_jobs_tool_name"), "tool_jobs", ["tool_name"], unique=False)
+ op.create_index(op.f("ix_tool_jobs_user_id"), "tool_jobs", ["user_id"], unique=False)
+
+ op.create_table(
+ "srs_extractions",
+ sa.Column("id", sa.Integer(), nullable=False),
+ sa.Column("job_id", sa.Integer(), nullable=False),
+ sa.Column("document_name", sa.String(length=255), nullable=False),
+ sa.Column("document_title", sa.String(length=255), nullable=False),
+ sa.Column("generated_at", sa.DateTime(), nullable=False),
+ sa.Column("total_requirements", sa.Integer(), nullable=False),
+ sa.Column("statistics", sa.JSON(), nullable=True),
+ sa.Column("raw_output", sa.JSON(), nullable=True),
+ sa.Column("created_at", sa.DateTime(), nullable=False),
+ sa.Column("updated_at", sa.DateTime(), nullable=False),
+ sa.ForeignKeyConstraint(["job_id"], ["tool_jobs.id"], ondelete="CASCADE"),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("job_id"),
+ )
+ op.create_index(op.f("ix_srs_extractions_id"), "srs_extractions", ["id"], unique=False)
+
+ op.create_table(
+ "srs_requirements",
+ sa.Column("id", sa.Integer(), nullable=False),
+ sa.Column("extraction_id", sa.Integer(), nullable=False),
+ sa.Column("requirement_uid", sa.String(length=64), nullable=False),
+ sa.Column("title", sa.String(length=255), nullable=False),
+ sa.Column("description", mysql.LONGTEXT(), nullable=False),
+ sa.Column("priority", sa.String(length=16), nullable=False),
+ sa.Column("acceptance_criteria", sa.JSON(), nullable=False),
+ sa.Column("source_field", sa.String(length=255), nullable=False),
+ sa.Column("section_number", sa.String(length=64), nullable=True),
+ sa.Column("section_title", sa.String(length=255), nullable=True),
+ sa.Column("requirement_type", sa.String(length=64), nullable=True),
+ sa.Column("sort_order", sa.Integer(), nullable=False),
+ sa.Column("created_at", sa.DateTime(), nullable=False),
+ sa.Column("updated_at", sa.DateTime(), nullable=False),
+ sa.ForeignKeyConstraint(["extraction_id"], ["srs_extractions.id"], ondelete="CASCADE"),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("extraction_id", "requirement_uid", name="uq_srs_extraction_requirement_uid"),
+ )
+ op.create_index(op.f("ix_srs_requirements_id"), "srs_requirements", ["id"], unique=False)
+ op.create_index(
+ "idx_srs_requirements_extraction_sort",
+ "srs_requirements",
+ ["extraction_id", "sort_order"],
+ unique=False,
+ )
+
+
+def downgrade() -> None:
+ op.drop_index("idx_srs_requirements_extraction_sort", table_name="srs_requirements")
+ op.drop_index(op.f("ix_srs_requirements_id"), table_name="srs_requirements")
+ op.drop_table("srs_requirements")
+
+ op.drop_index(op.f("ix_srs_extractions_id"), table_name="srs_extractions")
+ op.drop_table("srs_extractions")
+
+ op.drop_index(op.f("ix_tool_jobs_user_id"), table_name="tool_jobs")
+ op.drop_index(op.f("ix_tool_jobs_tool_name"), table_name="tool_jobs")
+ op.drop_index(op.f("ix_tool_jobs_id"), table_name="tool_jobs")
+ op.drop_table("tool_jobs")
diff --git a/rag-web-ui/backend/alembic/versions/e214adf7fb66_add_api_keys_table.py b/rag-web-ui/backend/alembic/versions/e214adf7fb66_add_api_keys_table.py
new file mode 100644
index 0000000..2216347
--- /dev/null
+++ b/rag-web-ui/backend/alembic/versions/e214adf7fb66_add_api_keys_table.py
@@ -0,0 +1,49 @@
+"""add_api_keys_table
+
+Revision ID: e214adf7fb66
+Revises: 5be054bd6587
+Create Date: 2024-01-20 13:24:00.000000
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision: str = 'e214adf7fb66'
+down_revision: Union[str, None] = '5be054bd6587'
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table(
+ 'api_keys',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('key', sa.String(length=64), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('is_active', sa.Boolean(), nullable=False, default=True),
+ sa.Column('last_used_at', sa.DateTime(), nullable=True),
+ sa.Column('created_at', sa.DateTime(), nullable=False, server_default=sa.text('CURRENT_TIMESTAMP')),
+ sa.Column('updated_at', sa.DateTime(), nullable=False, server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP')),
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
+ sa.PrimaryKeyConstraint('id'),
+ sa.UniqueConstraint('key')
+ )
+ op.create_index(op.f('ix_api_keys_id'), 'api_keys', ['id'], unique=False)
+ op.create_index(op.f('ix_api_keys_key'), 'api_keys', ['key'], unique=True)
+ op.create_index(op.f('ix_api_keys_name'), 'api_keys', ['name'], unique=False)
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index(op.f('ix_api_keys_name'), table_name='api_keys')
+ op.drop_index(op.f('ix_api_keys_key'), table_name='api_keys')
+ op.drop_index(op.f('ix_api_keys_id'), table_name='api_keys')
+ op.drop_table('api_keys')
+ # ### end Alembic commands ###
diff --git a/rag-web-ui/backend/alembic/versions/fd73eebc87c1_add_document_uploads_table.py b/rag-web-ui/backend/alembic/versions/fd73eebc87c1_add_document_uploads_table.py
new file mode 100644
index 0000000..5237957
--- /dev/null
+++ b/rag-web-ui/backend/alembic/versions/fd73eebc87c1_add_document_uploads_table.py
@@ -0,0 +1,44 @@
+"""add document uploads table
+
+Revision ID: fd73eebc87c1
+Revises: 59cfa0f1361d
+Create Date: 2024-01-13 16:24:07.182834
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = 'fd73eebc87c1'
+down_revision = '59cfa0f1361d'
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ op.create_table(
+ 'document_uploads',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('knowledge_base_id', sa.Integer(), nullable=False),
+ sa.Column('file_name', sa.String(255), nullable=False),
+ sa.Column('file_hash', sa.String(64), nullable=False),
+ sa.Column('file_size', sa.BigInteger(), nullable=False),
+ sa.Column('content_type', sa.String(100), nullable=False),
+ sa.Column('temp_path', sa.String(255), nullable=False),
+ sa.Column('created_at', sa.TIMESTAMP(), server_default=sa.text('now()'), nullable=False),
+ sa.Column('status', sa.String(50), nullable=False, server_default='pending'),
+ sa.Column('error_message', sa.Text(), nullable=True),
+ sa.PrimaryKeyConstraint('id'),
+ sa.ForeignKeyConstraint(['knowledge_base_id'], ['knowledge_bases.id'], ondelete='CASCADE')
+ )
+
+ # 添加索引以加速查询
+ op.create_index('ix_document_uploads_created_at', 'document_uploads', ['created_at'])
+ op.create_index('ix_document_uploads_status', 'document_uploads', ['status'])
+
+
+def downgrade() -> None:
+ op.drop_index('ix_document_uploads_status')
+ op.drop_index('ix_document_uploads_created_at')
+ op.drop_table('document_uploads')
diff --git a/rag-web-ui/backend/alembic/versions/initial_schema.py b/rag-web-ui/backend/alembic/versions/initial_schema.py
new file mode 100644
index 0000000..eba31d3
--- /dev/null
+++ b/rag-web-ui/backend/alembic/versions/initial_schema.py
@@ -0,0 +1,148 @@
+"""initial schema
+
+Revision ID: initial_schema
+Revises:
+Create Date: 2024-01-13 15:00:00.000000
+
+"""
+from typing import Sequence, Union
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+# revision identifiers, used by Alembic.
+revision: str = 'initial_schema'
+down_revision: Union[str, None] = None
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+def upgrade() -> None:
+ # Create users table
+ op.create_table(
+ 'users',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('email', sa.String(255), nullable=False),
+ sa.Column('username', sa.String(255), nullable=False),
+ sa.Column('hashed_password', sa.String(255), nullable=False),
+ sa.Column('is_active', sa.Boolean(), nullable=True, default=True),
+ sa.Column('is_superuser', sa.Boolean(), nullable=True, default=False),
+ sa.Column('created_at', sa.DateTime(), nullable=False),
+ sa.Column('updated_at', sa.DateTime(), nullable=False),
+ sa.PrimaryKeyConstraint('id'),
+ sa.UniqueConstraint('email'),
+ sa.UniqueConstraint('username')
+ )
+
+ # Create knowledge_bases table
+ op.create_table(
+ 'knowledge_bases',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(255), nullable=False),
+ sa.Column('description', mysql.LONGTEXT(), nullable=True),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('created_at', sa.DateTime(), nullable=False),
+ sa.Column('updated_at', sa.DateTime(), nullable=False),
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+
+ # Create documents table
+ op.create_table(
+ 'documents',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('file_path', sa.String(255), nullable=False),
+ sa.Column('file_name', sa.String(255), nullable=False),
+ sa.Column('file_size', sa.Integer(), nullable=True),
+ sa.Column('content_type', sa.String(100), nullable=True),
+ sa.Column('file_hash', sa.String(64), nullable=True),
+ sa.Column('knowledge_base_id', sa.Integer(), nullable=False),
+ sa.Column('created_at', sa.DateTime(), nullable=False),
+ sa.Column('updated_at', sa.DateTime(), nullable=False),
+ sa.ForeignKeyConstraint(['knowledge_base_id'], ['knowledge_bases.id'], ),
+ sa.PrimaryKeyConstraint('id'),
+ sa.UniqueConstraint('knowledge_base_id', 'file_name', name='uq_kb_file_name')
+ )
+
+ # Create document_chunks table
+ op.create_table(
+ 'document_chunks',
+ sa.Column('id', sa.String(64), nullable=False),
+ sa.Column('kb_id', sa.Integer(), nullable=False),
+ sa.Column('file_name', sa.String(255), nullable=False),
+ sa.Column('metadata', sa.JSON(), nullable=True),
+ sa.Column('hash', sa.String(64), nullable=False),
+ sa.Column('created_at', sa.TIMESTAMP(), server_default=sa.text('CURRENT_TIMESTAMP')),
+ sa.Column('updated_at', sa.TIMESTAMP(), server_default=sa.text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP')),
+ sa.PrimaryKeyConstraint('id')
+ )
+
+ # Create chats table
+ op.create_table(
+ 'chats',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('title', sa.String(255), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('created_at', sa.DateTime(), nullable=False),
+ sa.Column('updated_at', sa.DateTime(), nullable=False),
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+
+ # Create chat_knowledge_bases table (association table)
+ op.create_table(
+ 'chat_knowledge_bases',
+ sa.Column('chat_id', sa.Integer(), nullable=False),
+ sa.Column('knowledge_base_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['chat_id'], ['chats.id'], ),
+ sa.ForeignKeyConstraint(['knowledge_base_id'], ['knowledge_bases.id'], ),
+ sa.PrimaryKeyConstraint('chat_id', 'knowledge_base_id')
+ )
+
+ # Create messages table
+ op.create_table(
+ 'messages',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('chat_id', sa.Integer(), nullable=False),
+ sa.Column('role', sa.String(50), nullable=False),
+ sa.Column('content', mysql.LONGTEXT(), nullable=False),
+ sa.Column('created_at', sa.DateTime(), nullable=False),
+ sa.Column('updated_at', sa.DateTime(), nullable=False),
+ sa.ForeignKeyConstraint(['chat_id'], ['chats.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+
+ # Create processing_tasks table
+ op.create_table(
+ 'processing_tasks',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('knowledge_base_id', sa.Integer(), nullable=False),
+ sa.Column('document_id', sa.Integer(), nullable=False),
+ sa.Column('status', sa.String(50), nullable=False, default='pending'),
+ sa.Column('error_message', sa.Text(), nullable=True),
+ sa.Column('created_at', sa.DateTime(), nullable=False),
+ sa.Column('updated_at', sa.DateTime(), nullable=False),
+ sa.ForeignKeyConstraint(['document_id'], ['documents.id'], ),
+ sa.ForeignKeyConstraint(['knowledge_base_id'], ['knowledge_bases.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+
+ # Create indexes
+ op.create_index('idx_kb_file_name', 'document_chunks', ['kb_id', 'file_name'])
+ op.create_index('idx_hash', 'document_chunks', ['hash'])
+ op.create_index('idx_file_hash', 'documents', ['file_hash'])
+
+def downgrade() -> None:
+ # Drop indexes
+ op.drop_index('idx_hash', table_name='document_chunks')
+ op.drop_index('idx_kb_file_name', table_name='document_chunks')
+ op.drop_index('idx_file_hash', table_name='documents')
+
+ # Drop tables in reverse order
+ op.drop_table('processing_tasks')
+ op.drop_table('messages')
+ op.drop_table('chat_knowledge_bases')
+ op.drop_table('chats')
+ op.drop_table('document_chunks')
+ op.drop_table('documents')
+ op.drop_table('knowledge_bases')
+ op.drop_table('users')
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/__init__.py b/rag-web-ui/backend/app/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/rag-web-ui/backend/app/api/__init__.py b/rag-web-ui/backend/app/api/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/rag-web-ui/backend/app/api/api_v1/__init__.py b/rag-web-ui/backend/app/api/api_v1/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/rag-web-ui/backend/app/api/api_v1/api.py b/rag-web-ui/backend/app/api/api_v1/api.py
new file mode 100644
index 0000000..b05e85b
--- /dev/null
+++ b/rag-web-ui/backend/app/api/api_v1/api.py
@@ -0,0 +1,11 @@
+from fastapi import APIRouter
+from app.api.api_v1 import api_keys, auth, chat, knowledge_base, testing, tools
+
+api_router = APIRouter()
+
+api_router.include_router(auth.router, prefix="/auth", tags=["auth"])
+api_router.include_router(knowledge_base.router, prefix="/knowledge-base", tags=["knowledge-base"])
+api_router.include_router(chat.router, prefix="/chat", tags=["chat"])
+api_router.include_router(api_keys.router, prefix="/api-keys", tags=["api-keys"])
+api_router.include_router(testing.router, prefix="/testing", tags=["testing"])
+api_router.include_router(tools.router, prefix="/tools", tags=["tools"])
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/api/api_v1/api_keys.py b/rag-web-ui/backend/app/api/api_v1/api_keys.py
new file mode 100644
index 0000000..e07576a
--- /dev/null
+++ b/rag-web-ui/backend/app/api/api_v1/api_keys.py
@@ -0,0 +1,84 @@
+from typing import Any, List
+from fastapi import APIRouter, Depends, HTTPException
+from sqlalchemy.orm import Session
+import logging
+
+from app import models, schemas
+from app.db.session import get_db
+from app.services.api_key import APIKeyService
+from app.core.security import get_current_user
+
+router = APIRouter()
+logger = logging.getLogger(__name__)
+
+@router.get("/", response_model=List[schemas.APIKey])
+def read_api_keys(
+ db: Session = Depends(get_db),
+ skip: int = 0,
+ limit: int = 100,
+ current_user: models.User = Depends(get_current_user),
+) -> Any:
+ """
+ Retrieve API keys.
+ """
+ api_keys = APIKeyService.get_api_keys(
+ db=db, user_id=current_user.id, skip=skip, limit=limit
+ )
+ return api_keys
+
+@router.post("/", response_model=schemas.APIKey)
+def create_api_key(
+ *,
+ db: Session = Depends(get_db),
+ api_key_in: schemas.APIKeyCreate,
+ current_user: models.User = Depends(get_current_user),
+) -> Any:
+ """
+ Create new API key.
+ """
+ api_key = APIKeyService.create_api_key(
+ db=db, user_id=current_user.id, name=api_key_in.name
+ )
+ logger.info(f"API key created: {api_key.key} for user {current_user.id}")
+ return api_key
+
+@router.put("/{id}", response_model=schemas.APIKey)
+def update_api_key(
+ *,
+ db: Session = Depends(get_db),
+ id: int,
+ api_key_in: schemas.APIKeyUpdate,
+ current_user: models.User = Depends(get_current_user),
+) -> Any:
+ """
+ Update API key.
+ """
+ api_key = APIKeyService.get_api_key(db=db, api_key_id=id)
+ if not api_key:
+ raise HTTPException(status_code=404, detail="API key not found")
+ if api_key.user_id != current_user.id:
+ raise HTTPException(status_code=403, detail="Not enough permissions")
+
+ api_key = APIKeyService.update_api_key(db=db, api_key=api_key, update_data=api_key_in)
+ logger.info(f"API key updated: {api_key.key} for user {current_user.id}")
+ return api_key
+
+@router.delete("/{id}", response_model=schemas.APIKey)
+def delete_api_key(
+ *,
+ db: Session = Depends(get_db),
+ id: int,
+ current_user: models.User = Depends(get_current_user),
+) -> Any:
+ """
+ Delete API key.
+ """
+ api_key = APIKeyService.get_api_key(db=db, api_key_id=id)
+ if not api_key:
+ raise HTTPException(status_code=404, detail="API key not found")
+ if api_key.user_id != current_user.id:
+ raise HTTPException(status_code=403, detail="Not enough permissions")
+
+ APIKeyService.delete_api_key(db=db, api_key=api_key)
+ logger.info(f"API key deleted: {api_key.key} for user {current_user.id}")
+ return api_key
diff --git a/rag-web-ui/backend/app/api/api_v1/auth.py b/rag-web-ui/backend/app/api/api_v1/auth.py
new file mode 100644
index 0000000..4024504
--- /dev/null
+++ b/rag-web-ui/backend/app/api/api_v1/auth.py
@@ -0,0 +1,88 @@
+from datetime import timedelta
+from typing import Any
+from fastapi import APIRouter, Depends, HTTPException, status
+from fastapi.security import OAuth2PasswordRequestForm
+from sqlalchemy.orm import Session
+from requests.exceptions import RequestException
+
+from app.core import security
+from app.core.security import get_current_user
+from app.core.config import settings
+from app.db.session import get_db
+from app.models.user import User
+from app.schemas.token import Token
+from app.schemas.user import UserCreate, UserResponse
+
+router = APIRouter()
+
+@router.post("/register", response_model=UserResponse)
+def register(*, db: Session = Depends(get_db), user_in: UserCreate) -> Any:
+ """
+ Register a new user.
+ """
+ try:
+ # Check if user with this email exists
+ user = db.query(User).filter(User.email == user_in.email).first()
+ if user:
+ raise HTTPException(
+ status_code=400,
+ detail="A user with this email already exists.",
+ )
+
+ # Check if user with this username exists
+ user = db.query(User).filter(User.username == user_in.username).first()
+ if user:
+ raise HTTPException(
+ status_code=400,
+ detail="A user with this username already exists.",
+ )
+
+ # Create new user
+ user = User(
+ email=user_in.email,
+ username=user_in.username,
+ hashed_password=security.get_password_hash(user_in.password),
+ )
+ db.add(user)
+ db.commit()
+ db.refresh(user)
+ return user
+ except RequestException as e:
+ raise HTTPException(
+ status_code=503,
+ detail="Network error or server is unreachable. Please try again later.",
+ ) from e
+
+@router.post("/token", response_model=Token)
+def login_access_token(
+ db: Session = Depends(get_db), form_data: OAuth2PasswordRequestForm = Depends()
+) -> Any:
+ """
+ OAuth2 compatible token login, get an access token for future requests.
+ """
+ user = db.query(User).filter(User.username == form_data.username).first()
+ if not user or not security.verify_password(form_data.password, user.hashed_password):
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail="Incorrect username or password",
+ headers={"WWW-Authenticate": "Bearer"},
+ )
+ elif not user.is_active:
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail="Inactive user",
+ headers={"WWW-Authenticate": "Bearer"},
+ )
+
+ access_token_expires = timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
+ access_token = security.create_access_token(
+ data={"sub": user.username}, expires_delta=access_token_expires
+ )
+ return {"access_token": access_token, "token_type": "bearer"}
+
+@router.post("/test-token", response_model=UserResponse)
+def test_token(current_user: User = Depends(get_current_user)) -> Any:
+ """
+ Test access token by getting current user.
+ """
+ return current_user
diff --git a/rag-web-ui/backend/app/api/api_v1/chat.py b/rag-web-ui/backend/app/api/api_v1/chat.py
new file mode 100644
index 0000000..a2241fa
--- /dev/null
+++ b/rag-web-ui/backend/app/api/api_v1/chat.py
@@ -0,0 +1,155 @@
+from typing import List, Any
+from fastapi import APIRouter, Depends, HTTPException
+from fastapi.responses import StreamingResponse
+from sqlalchemy.orm import Session, joinedload
+from app.db.session import get_db
+from app.models.user import User
+from app.models.chat import Chat, Message
+from app.models.knowledge import KnowledgeBase
+from app.schemas.chat import (
+ ChatCreate,
+ ChatResponse,
+ ChatUpdate,
+ MessageCreate,
+ MessageResponse
+)
+from app.core.security import get_current_user
+from app.services.chat_service import generate_response
+
+router = APIRouter()
+
+@router.post("/", response_model=ChatResponse)
+def create_chat(
+ *,
+ db: Session = Depends(get_db),
+ chat_in: ChatCreate,
+ current_user: User = Depends(get_current_user)
+) -> Any:
+ # Verify knowledge bases exist and belong to user
+ knowledge_bases = (
+ db.query(KnowledgeBase)
+ .filter(
+ KnowledgeBase.id.in_(chat_in.knowledge_base_ids),
+ KnowledgeBase.user_id == current_user.id
+ )
+ .all()
+ )
+ if len(knowledge_bases) != len(chat_in.knowledge_base_ids):
+ raise HTTPException(
+ status_code=400,
+ detail="One or more knowledge bases not found"
+ )
+
+ chat = Chat(
+ title=chat_in.title,
+ user_id=current_user.id,
+ )
+ chat.knowledge_bases = knowledge_bases
+
+ db.add(chat)
+ db.commit()
+ db.refresh(chat)
+ return chat
+
+@router.get("/", response_model=List[ChatResponse])
+def get_chats(
+ db: Session = Depends(get_db),
+ current_user: User = Depends(get_current_user),
+ skip: int = 0,
+ limit: int = 100
+) -> Any:
+ chats = (
+ db.query(Chat)
+ .filter(Chat.user_id == current_user.id)
+ .offset(skip)
+ .limit(limit)
+ .all()
+ )
+ return chats
+
+@router.get("/{chat_id}", response_model=ChatResponse)
+def get_chat(
+ *,
+ db: Session = Depends(get_db),
+ chat_id: int,
+ current_user: User = Depends(get_current_user)
+) -> Any:
+ chat = (
+ db.query(Chat)
+ .filter(
+ Chat.id == chat_id,
+ Chat.user_id == current_user.id
+ )
+ .first()
+ )
+ if not chat:
+ raise HTTPException(status_code=404, detail="Chat not found")
+ return chat
+
+@router.post("/{chat_id}/messages")
+async def create_message(
+ *,
+ db: Session = Depends(get_db),
+ chat_id: int,
+ messages: dict,
+ current_user: User = Depends(get_current_user)
+) -> StreamingResponse:
+ chat = (
+ db.query(Chat)
+ .options(joinedload(Chat.knowledge_bases))
+ .filter(
+ Chat.id == chat_id,
+ Chat.user_id == current_user.id
+ )
+ .first()
+ )
+ if not chat:
+ raise HTTPException(status_code=404, detail="Chat not found")
+
+ # Get the last user message
+ last_message = messages["messages"][-1]
+ if last_message["role"] != "user":
+ raise HTTPException(status_code=400, detail="Last message must be from user")
+
+ # Get knowledge base IDs
+ knowledge_base_ids = [kb.id for kb in chat.knowledge_bases]
+
+ async def response_stream():
+ async for chunk in generate_response(
+ query=last_message["content"],
+ messages=messages,
+ knowledge_base_ids=knowledge_base_ids,
+ chat_id=chat_id,
+ db=db
+ ):
+ yield chunk
+
+ return StreamingResponse(
+ response_stream(),
+ media_type="text/event-stream",
+ headers={
+ "x-vercel-ai-data-stream": "v1"
+ }
+ )
+
+@router.delete("/{chat_id}")
+def delete_chat(
+ *,
+ db: Session = Depends(get_db),
+ chat_id: int,
+ current_user: User = Depends(get_current_user)
+) -> Any:
+ chat = (
+ db.query(Chat)
+ .filter(
+ Chat.id == chat_id,
+ Chat.user_id == current_user.id
+ )
+ .first()
+ )
+ if not chat:
+ raise HTTPException(status_code=404, detail="Chat not found")
+
+ db.delete(chat)
+ db.commit()
+ return {"status": "success"}
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/api/api_v1/knowledge_base.py b/rag-web-ui/backend/app/api/api_v1/knowledge_base.py
new file mode 100644
index 0000000..cf90b2e
--- /dev/null
+++ b/rag-web-ui/backend/app/api/api_v1/knowledge_base.py
@@ -0,0 +1,575 @@
+import hashlib
+from typing import List, Any, Dict
+from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, BackgroundTasks, Query
+from sqlalchemy.orm import Session
+from langchain_chroma import Chroma
+from sqlalchemy import text
+import logging
+from datetime import datetime, timedelta
+from pydantic import BaseModel
+from sqlalchemy.orm import selectinload
+import time
+import asyncio
+
+from app.db.session import get_db
+from app.models.user import User
+from app.core.security import get_current_user
+from app.models.knowledge import KnowledgeBase, Document, ProcessingTask, DocumentChunk, DocumentUpload
+from app.schemas.knowledge import (
+ KnowledgeBaseCreate,
+ KnowledgeBaseResponse,
+ KnowledgeBaseUpdate,
+ DocumentResponse,
+ PreviewRequest
+)
+from app.services.document_processor import process_document_background, upload_document, preview_document, PreviewResult
+from app.core.config import settings
+from app.core.minio import get_minio_client
+from minio.error import MinioException
+from app.services.vector_store import VectorStoreFactory
+from app.services.embedding.embedding_factory import EmbeddingsFactory
+
+router = APIRouter()
+
+logger = logging.getLogger(__name__)
+
+class TestRetrievalRequest(BaseModel):
+ query: str
+ kb_id: int
+ top_k: int
+
+@router.post("", response_model=KnowledgeBaseResponse)
+def create_knowledge_base(
+ *,
+ db: Session = Depends(get_db),
+ kb_in: KnowledgeBaseCreate,
+ current_user: User = Depends(get_current_user)
+) -> Any:
+ """
+ Create new knowledge base.
+ """
+ kb = KnowledgeBase(
+ name=kb_in.name,
+ description=kb_in.description,
+ user_id=current_user.id
+ )
+ db.add(kb)
+ db.commit()
+ db.refresh(kb)
+ logger.info(f"Knowledge base created: {kb.name} for user {current_user.id}")
+ return kb
+
+@router.get("", response_model=List[KnowledgeBaseResponse])
+def get_knowledge_bases(
+ db: Session = Depends(get_db),
+ current_user: User = Depends(get_current_user),
+ skip: int = 0,
+ limit: int = 100
+) -> Any:
+ """
+ Retrieve knowledge bases.
+ """
+ knowledge_bases = (
+ db.query(KnowledgeBase)
+ .filter(KnowledgeBase.user_id == current_user.id)
+ .offset(skip)
+ .limit(limit)
+ .all()
+ )
+ return knowledge_bases
+
+@router.get("/{kb_id}", response_model=KnowledgeBaseResponse)
+def get_knowledge_base(
+ *,
+ db: Session = Depends(get_db),
+ kb_id: int,
+ current_user: User = Depends(get_current_user)
+) -> Any:
+ """
+ Get knowledge base by ID.
+ """
+ from sqlalchemy.orm import joinedload
+
+ kb = (
+ db.query(KnowledgeBase)
+ .options(
+ joinedload(KnowledgeBase.documents)
+ .joinedload(Document.processing_tasks)
+ )
+ .filter(
+ KnowledgeBase.id == kb_id,
+ KnowledgeBase.user_id == current_user.id
+ )
+ .first()
+ )
+
+ if not kb:
+ raise HTTPException(status_code=404, detail="Knowledge base not found")
+
+ return kb
+
+@router.put("/{kb_id}", response_model=KnowledgeBaseResponse)
+def update_knowledge_base(
+ *,
+ db: Session = Depends(get_db),
+ kb_id: int,
+ kb_in: KnowledgeBaseUpdate,
+ current_user: User = Depends(get_current_user)
+) -> Any:
+ """
+ Update knowledge base.
+ """
+ kb = db.query(KnowledgeBase).filter(
+ KnowledgeBase.id == kb_id,
+ KnowledgeBase.user_id == current_user.id
+ ).first()
+
+ if not kb:
+ raise HTTPException(status_code=404, detail="Knowledge base not found")
+
+ for field, value in kb_in.dict(exclude_unset=True).items():
+ setattr(kb, field, value)
+
+ db.add(kb)
+ db.commit()
+ db.refresh(kb)
+ logger.info(f"Knowledge base updated: {kb.name} for user {current_user.id}")
+ return kb
+
+@router.delete("/{kb_id}")
+async def delete_knowledge_base(
+ *,
+ db: Session = Depends(get_db),
+ kb_id: int,
+ current_user: User = Depends(get_current_user)
+) -> Any:
+ """
+ Delete knowledge base and all associated resources.
+ """
+ logger = logging.getLogger(__name__)
+
+ kb = (
+ db.query(KnowledgeBase)
+ .filter(
+ KnowledgeBase.id == kb_id,
+ KnowledgeBase.user_id == current_user.id
+ )
+ .first()
+ )
+ if not kb:
+ raise HTTPException(status_code=404, detail="Knowledge base not found")
+
+ try:
+ # Get all document file paths before deletion
+ document_paths = [doc.file_path for doc in kb.documents]
+
+ # Initialize services
+ minio_client = get_minio_client()
+ embeddings = EmbeddingsFactory.create()
+
+ vector_store = VectorStoreFactory.create(
+ store_type=settings.VECTOR_STORE_TYPE,
+ collection_name=f"kb_{kb_id}",
+ embedding_function=embeddings,
+ )
+
+ # Clean up external resources first
+ cleanup_errors = []
+
+ # 1. Clean up MinIO files
+ try:
+ # Delete all objects with prefix kb_{kb_id}/
+ objects = minio_client.list_objects(settings.MINIO_BUCKET_NAME, prefix=f"kb_{kb_id}/")
+ for obj in objects:
+ minio_client.remove_object(settings.MINIO_BUCKET_NAME, obj.object_name)
+ logger.info(f"Cleaned up MinIO files for knowledge base {kb_id}")
+ except MinioException as e:
+ cleanup_errors.append(f"Failed to clean up MinIO files: {str(e)}")
+ logger.error(f"MinIO cleanup error for kb {kb_id}: {str(e)}")
+
+ # 2. Clean up vector store
+ try:
+ vector_store._store.delete_collection(f"kb_{kb_id}")
+ logger.info(f"Cleaned up vector store for knowledge base {kb_id}")
+ except Exception as e:
+ cleanup_errors.append(f"Failed to clean up vector store: {str(e)}")
+ logger.error(f"Vector store cleanup error for kb {kb_id}: {str(e)}")
+
+ # Finally, delete database records in a single transaction
+ db.delete(kb)
+ db.commit()
+
+ # Report any cleanup errors in the response
+ if cleanup_errors:
+ return {
+ "message": "Knowledge base deleted with cleanup warnings",
+ "warnings": cleanup_errors
+ }
+
+ return {"message": "Knowledge base and all associated resources deleted successfully"}
+ except Exception as e:
+ db.rollback()
+ logger.error(f"Failed to delete knowledge base {kb_id}: {str(e)}")
+ raise HTTPException(status_code=500, detail=f"Failed to delete knowledge base: {str(e)}")
+
+# Batch upload documents
+@router.post("/{kb_id}/documents/upload")
+async def upload_kb_documents(
+ kb_id: int,
+ files: List[UploadFile],
+ db: Session = Depends(get_db),
+ current_user: User = Depends(get_current_user)
+):
+ """
+ Upload multiple documents to MinIO.
+ """
+ kb = db.query(KnowledgeBase).filter(
+ KnowledgeBase.id == kb_id,
+ KnowledgeBase.user_id == current_user.id
+ ).first()
+ if not kb:
+ raise HTTPException(status_code=404, detail="Knowledge base not found")
+
+ results = []
+ for file in files:
+ # 1. 计算文件 hash
+ file_content = await file.read()
+ file_hash = hashlib.sha256(file_content).hexdigest()
+
+ # 2. 检查是否存在完全相同的文件(名称和hash都相同)
+ existing_document = db.query(Document).filter(
+ Document.file_name == file.filename,
+ Document.file_hash == file_hash,
+ Document.knowledge_base_id == kb_id
+ ).first()
+
+ if existing_document:
+ # 完全相同的文件,直接返回
+ results.append({
+ "document_id": existing_document.id,
+ "file_name": existing_document.file_name,
+ "status": "exists",
+ "message": "文件已存在且已处理完成",
+ "skip_processing": True
+ })
+ continue
+
+ # 3. 上传到临时目录
+ temp_path = f"kb_{kb_id}/temp/{file.filename}"
+ await file.seek(0)
+ try:
+ minio_client = get_minio_client()
+ file_size = len(file_content) # 使用之前读取的文件内容长度
+ minio_client.put_object(
+ bucket_name=settings.MINIO_BUCKET_NAME,
+ object_name=temp_path,
+ data=file.file,
+ length=file_size, # 指定文件大小
+ content_type=file.content_type
+ )
+ except MinioException as e:
+ logger.error(f"Failed to upload file to MinIO: {str(e)}")
+ raise HTTPException(status_code=500, detail="Failed to upload file")
+
+ # 4. 创建上传记录
+ upload = DocumentUpload(
+ knowledge_base_id=kb_id,
+ file_name=file.filename,
+ file_hash=file_hash,
+ file_size=len(file_content),
+ content_type=file.content_type,
+ temp_path=temp_path
+ )
+ db.add(upload)
+ db.commit()
+ db.refresh(upload)
+
+ results.append({
+ "upload_id": upload.id,
+ "file_name": file.filename,
+ "temp_path": temp_path,
+ "status": "pending",
+ "skip_processing": False
+ })
+
+ return results
+
+@router.post("/{kb_id}/documents/preview")
+async def preview_kb_documents(
+ kb_id: int,
+ preview_request: PreviewRequest,
+ db: Session = Depends(get_db),
+ current_user: User = Depends(get_current_user)
+) -> Dict[int, PreviewResult]:
+ """
+ Preview multiple documents' chunks.
+ """
+ results = {}
+ for doc_id in preview_request.document_ids:
+ document = db.query(Document).join(KnowledgeBase).filter(
+ Document.id == doc_id,
+ Document.knowledge_base_id == kb_id,
+ KnowledgeBase.user_id == current_user.id
+ ).first()
+
+ if document:
+ file_path = document.file_path
+ else:
+ upload = db.query(DocumentUpload).join(KnowledgeBase).filter(
+ DocumentUpload.id == doc_id,
+ DocumentUpload.knowledge_base_id == kb_id,
+ KnowledgeBase.user_id == current_user.id
+ ).first()
+
+ if not upload:
+ raise HTTPException(status_code=404, detail=f"Document {doc_id} not found")
+
+ file_path = upload.temp_path
+
+ preview = await preview_document(
+ file_path,
+ chunk_size=preview_request.chunk_size,
+ chunk_overlap=preview_request.chunk_overlap
+ )
+ results[doc_id] = preview
+
+ return results
+
+@router.post("/{kb_id}/documents/process")
+async def process_kb_documents(
+ kb_id: int,
+ upload_results: List[dict],
+ background_tasks: BackgroundTasks,
+ db: Session = Depends(get_db),
+ current_user: User = Depends(get_current_user)
+):
+ """
+ Process multiple documents asynchronously.
+ """
+ start_time = time.time()
+
+ kb = db.query(KnowledgeBase).filter(
+ KnowledgeBase.id == kb_id,
+ KnowledgeBase.user_id == current_user.id
+ ).first()
+
+ if not kb:
+ raise HTTPException(status_code=404, detail="Knowledge base not found")
+
+ task_info = []
+ upload_ids = []
+
+ for result in upload_results:
+ if result.get("skip_processing"):
+ continue
+ upload_ids.append(result["upload_id"])
+
+ if not upload_ids:
+ return {"tasks": []}
+
+ uploads = db.query(DocumentUpload).filter(DocumentUpload.id.in_(upload_ids)).all()
+ uploads_dict = {upload.id: upload for upload in uploads}
+
+ all_tasks = []
+ for upload_id in upload_ids:
+ upload = uploads_dict.get(upload_id)
+ if not upload:
+ continue
+
+ task = ProcessingTask(
+ document_upload_id=upload_id,
+ knowledge_base_id=kb_id,
+ status="pending"
+ )
+ all_tasks.append(task)
+
+ db.add_all(all_tasks)
+ db.commit()
+
+ for task in all_tasks:
+ db.refresh(task)
+
+ task_data = []
+ for i, upload_id in enumerate(upload_ids):
+ if i < len(all_tasks):
+ task = all_tasks[i]
+ upload = uploads_dict.get(upload_id)
+
+ task_info.append({
+ "upload_id": upload_id,
+ "task_id": task.id
+ })
+
+ if upload:
+ task_data.append({
+ "task_id": task.id,
+ "upload_id": upload_id,
+ "temp_path": upload.temp_path,
+ "file_name": upload.file_name
+ })
+
+ background_tasks.add_task(
+ add_processing_tasks_to_queue,
+ task_data,
+ kb_id
+ )
+
+ return {"tasks": task_info}
+
+async def add_processing_tasks_to_queue(task_data, kb_id):
+ """Helper function to add document processing tasks to the queue without blocking the main response."""
+ for data in task_data:
+ asyncio.create_task(
+ process_document_background(
+ data["temp_path"],
+ data["file_name"],
+ kb_id,
+ data["task_id"],
+ None
+ )
+ )
+ logger.info(f"Added {len(task_data)} document processing tasks to queue")
+
+@router.post("/cleanup")
+async def cleanup_temp_files(
+ db: Session = Depends(get_db),
+ current_user: User = Depends(get_current_user)
+):
+ """
+ Clean up expired temporary files.
+ """
+ expired_time = datetime.utcnow() - timedelta(hours=24)
+ expired_uploads = db.query(DocumentUpload).filter(
+ DocumentUpload.created_at < expired_time
+ ).all()
+
+ minio_client = get_minio_client()
+ for upload in expired_uploads:
+ try:
+ minio_client.remove_object(
+ bucket_name=settings.MINIO_BUCKET_NAME,
+ object_name=upload.temp_path
+ )
+ except MinioException as e:
+ logger.error(f"Failed to delete temp file {upload.temp_path}: {str(e)}")
+
+ db.delete(upload)
+
+ db.commit()
+
+ return {"message": f"Cleaned up {len(expired_uploads)} expired uploads"}
+
+@router.get("/{kb_id}/documents/tasks")
+async def get_processing_tasks(
+ kb_id: int,
+ task_ids: str = Query(..., description="Comma-separated list of task IDs to check status for"),
+ db: Session = Depends(get_db),
+ current_user: User = Depends(get_current_user)
+):
+ """
+ Get status of multiple processing tasks.
+ """
+ task_id_list = [int(id.strip()) for id in task_ids.split(",")]
+
+ kb = db.query(KnowledgeBase).filter(
+ KnowledgeBase.id == kb_id,
+ KnowledgeBase.user_id == current_user.id
+ ).first()
+
+ if not kb:
+ raise HTTPException(status_code=404, detail="Knowledge base not found")
+
+ tasks = (
+ db.query(ProcessingTask)
+ .options(
+ selectinload(ProcessingTask.document_upload)
+ )
+ .filter(
+ ProcessingTask.id.in_(task_id_list),
+ ProcessingTask.knowledge_base_id == kb_id
+ )
+ .all()
+ )
+
+ return {
+ task.id: {
+ "document_id": task.document_id,
+ "status": task.status,
+ "error_message": task.error_message,
+ "upload_id": task.document_upload_id,
+ "file_name": task.document_upload.file_name if task.document_upload else None
+ }
+ for task in tasks
+ }
+
+@router.get("/{kb_id}/documents/{doc_id}", response_model=DocumentResponse)
+async def get_document(
+ *,
+ db: Session = Depends(get_db),
+ kb_id: int,
+ doc_id: int,
+ current_user: User = Depends(get_current_user)
+) -> Any:
+ """
+ Get document details by ID.
+ """
+ document = (
+ db.query(Document)
+ .join(KnowledgeBase)
+ .filter(
+ Document.id == doc_id,
+ Document.knowledge_base_id == kb_id,
+ KnowledgeBase.user_id == current_user.id
+ )
+ .first()
+ )
+
+ if not document:
+ raise HTTPException(status_code=404, detail="Document not found")
+
+ return document
+
+@router.post("/test-retrieval")
+async def test_retrieval(
+ request: TestRetrievalRequest,
+ background_tasks: BackgroundTasks,
+ db: Session = Depends(get_db),
+ current_user: User = Depends(get_current_user)
+) -> Any:
+ """
+ Test retrieval quality for a given query against a knowledge base.
+ """
+ try:
+ kb = db.query(KnowledgeBase).filter(
+ KnowledgeBase.id == request.kb_id,
+ KnowledgeBase.user_id == current_user.id
+ ).first()
+
+ if not kb:
+ raise HTTPException(
+ status_code=404,
+ detail=f"Knowledge base {request.kb_id} not found",
+ )
+
+ embeddings = EmbeddingsFactory.create()
+
+ vector_store = VectorStoreFactory.create(
+ store_type=settings.VECTOR_STORE_TYPE,
+ collection_name=f"kb_{request.kb_id}",
+ embedding_function=embeddings,
+ )
+
+ results = vector_store.similarity_search_with_score(request.query, k=request.top_k)
+
+ response = []
+ for doc, score in results:
+ response.append({
+ "content": doc.page_content,
+ "metadata": doc.metadata,
+ "score": float(score)
+ })
+
+ return {"results": response}
+
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
diff --git a/rag-web-ui/backend/app/api/api_v1/testing.py b/rag-web-ui/backend/app/api/api_v1/testing.py
new file mode 100644
index 0000000..db5e58d
--- /dev/null
+++ b/rag-web-ui/backend/app/api/api_v1/testing.py
@@ -0,0 +1,84 @@
+from typing import Any, Dict, List
+
+from fastapi import APIRouter, Depends
+from sqlalchemy.orm import Session
+
+from app.core.config import settings
+from app.core.security import get_current_user
+from app.db.session import get_db
+from app.models.knowledge import Document, KnowledgeBase
+from app.models.user import User
+from app.schemas.testing import TestingPipelineRequest, TestingPipelineResponse
+from app.services.embedding.embedding_factory import EmbeddingsFactory
+from app.services.retrieval.multi_kb_retriever import MultiKBRetriever, format_retrieval_context
+from app.services.testing_pipeline import run_testing_pipeline
+from app.services.vector_store import VectorStoreFactory
+
+router = APIRouter()
+
+
+async def _build_kb_vector_stores(db: Session, knowledge_bases: List[KnowledgeBase]) -> List[Dict[str, Any]]:
+ embeddings = EmbeddingsFactory.create()
+ kb_vector_stores: List[Dict[str, Any]] = []
+
+ for kb in knowledge_bases:
+ documents = db.query(Document).filter(Document.knowledge_base_id == kb.id).all()
+ if not documents:
+ continue
+
+ store = VectorStoreFactory.create(
+ store_type=settings.VECTOR_STORE_TYPE,
+ collection_name=f"kb_{kb.id}",
+ embedding_function=embeddings,
+ )
+ kb_vector_stores.append({"kb_id": kb.id, "store": store})
+
+ return kb_vector_stores
+
+
+@router.post("/generate", response_model=TestingPipelineResponse)
+async def generate_testing_content(
+ *,
+ payload: TestingPipelineRequest,
+ current_user: User = Depends(get_current_user),
+ db: Session = Depends(get_db),
+) -> Any:
+ _ = current_user
+
+ knowledge_context = (payload.knowledge_context or "").strip()
+ if payload.knowledge_base_ids:
+ knowledge_bases = (
+ db.query(KnowledgeBase)
+ .filter(
+ KnowledgeBase.id.in_(payload.knowledge_base_ids),
+ KnowledgeBase.user_id == current_user.id,
+ )
+ .all()
+ )
+
+ kb_vector_stores = await _build_kb_vector_stores(db, knowledge_bases)
+ if kb_vector_stores:
+ retriever = MultiKBRetriever(
+ reranker_weight=settings.RERANKER_WEIGHT,
+ )
+ retrieval_rows = await retriever.retrieve(
+ query=payload.requirement_text,
+ kb_vector_stores=kb_vector_stores,
+ fetch_k_per_kb=max(12, payload.retrieval_top_k * 2),
+ top_k=payload.retrieval_top_k,
+ )
+ if retrieval_rows:
+ knowledge_context = format_retrieval_context(retrieval_rows)
+
+ result = run_testing_pipeline(
+ user_requirement_text=payload.requirement_text,
+ requirement_type_input=payload.requirement_type,
+ debug=payload.debug,
+ knowledge_context=knowledge_context,
+ use_model_generation=payload.use_model_generation,
+ max_items_per_group=payload.max_items_per_group,
+ cases_per_item=payload.cases_per_item,
+ max_focus_points=payload.max_focus_points,
+ max_llm_calls=payload.max_llm_calls,
+ )
+ return result
diff --git a/rag-web-ui/backend/app/api/api_v1/tools.py b/rag-web-ui/backend/app/api/api_v1/tools.py
new file mode 100644
index 0000000..eeb37e6
--- /dev/null
+++ b/rag-web-ui/backend/app/api/api_v1/tools.py
@@ -0,0 +1,175 @@
+from pathlib import Path
+from typing import Any, List
+
+from fastapi import APIRouter, BackgroundTasks, Depends, File, HTTPException, UploadFile
+from sqlalchemy.orm import Session
+
+from app.core.security import get_current_user
+from app.db.session import get_db
+from app.models.tooling import SRSExtraction, ToolJob
+from app.models.user import User
+from app.schemas.tooling import (
+ SRSToolCreateJobResponse,
+ SRSToolJobStatusResponse,
+ SRSToolRequirementsSaveRequest,
+ SRSToolResultResponse,
+ ToolDefinitionResponse,
+)
+from app.services.srs_job_service import (
+ build_result_response,
+ ensure_upload_path,
+ replace_requirements,
+ run_srs_job,
+)
+from app.tools.registry import ToolRegistry
+from app.tools.srs_reqs_qwen import get_srs_tool
+
+router = APIRouter()
+
+# Register SRS tool when the router is imported.
+get_srs_tool()
+
+ALLOWED_EXTENSIONS = {".pdf", ".docx"}
+
+
+@router.get("", response_model=List[ToolDefinitionResponse])
+async def list_tools(
+ current_user: User = Depends(get_current_user),
+) -> Any:
+ _ = current_user
+ return ToolRegistry.list()
+
+
+@router.post("/srs/jobs", response_model=SRSToolCreateJobResponse)
+async def create_srs_job(
+ background_tasks: BackgroundTasks,
+ file: UploadFile = File(...),
+ db: Session = Depends(get_db),
+ current_user: User = Depends(get_current_user),
+) -> Any:
+ safe_name = Path(file.filename or "").name
+ ext = Path(safe_name).suffix.lower()
+ if ext not in ALLOWED_EXTENSIONS:
+ raise HTTPException(status_code=400, detail="仅支持 .pdf/.docx 文件")
+
+ job = ToolJob(
+ user_id=current_user.id,
+ tool_name="srs.requirement_extractor",
+ status="pending",
+ input_file_name=safe_name,
+ input_file_path="",
+ )
+ db.add(job)
+ db.commit()
+ db.refresh(job)
+
+ target_path = ensure_upload_path(job.id, safe_name)
+ try:
+ content = await file.read()
+ target_path.write_bytes(content)
+ except Exception as exc:
+ job.status = "failed"
+ job.error_message = f"保存上传文件失败: {exc}"
+ db.add(job)
+ db.commit()
+ raise HTTPException(status_code=500, detail="上传文件保存失败")
+
+ job.input_file_path = str(target_path.resolve())
+ db.add(job)
+ db.commit()
+
+ background_tasks.add_task(run_srs_job, job.id)
+
+ return {
+ "job_id": job.id,
+ "status": job.status,
+ }
+
+
+@router.get("/srs/jobs/{job_id}", response_model=SRSToolJobStatusResponse)
+async def get_srs_job_status(
+ job_id: int,
+ db: Session = Depends(get_db),
+ current_user: User = Depends(get_current_user),
+) -> Any:
+ job = (
+ db.query(ToolJob)
+ .filter(ToolJob.id == job_id, ToolJob.user_id == current_user.id)
+ .first()
+ )
+ if not job:
+ raise HTTPException(status_code=404, detail="任务不存在")
+
+ extraction = (
+ db.query(SRSExtraction)
+ .filter(SRSExtraction.job_id == job.id)
+ .first()
+ )
+
+ return {
+ "job_id": job.id,
+ "tool_name": job.tool_name,
+ "status": job.status,
+ "error_message": job.error_message,
+ "extraction_id": extraction.id if extraction else None,
+ "started_at": job.started_at,
+ "completed_at": job.completed_at,
+ }
+
+
+@router.get("/srs/jobs/{job_id}/result", response_model=SRSToolResultResponse)
+async def get_srs_job_result(
+ job_id: int,
+ db: Session = Depends(get_db),
+ current_user: User = Depends(get_current_user),
+) -> Any:
+ job = (
+ db.query(ToolJob)
+ .filter(ToolJob.id == job_id, ToolJob.user_id == current_user.id)
+ .first()
+ )
+ if not job:
+ raise HTTPException(status_code=404, detail="任务不存在")
+ if job.status != "completed":
+ raise HTTPException(status_code=409, detail="任务尚未完成")
+
+ extraction = (
+ db.query(SRSExtraction)
+ .filter(SRSExtraction.job_id == job.id)
+ .first()
+ )
+ if not extraction:
+ raise HTTPException(status_code=404, detail="任务结果不存在")
+
+ return build_result_response(job, extraction)
+
+
+@router.put("/srs/jobs/{job_id}/requirements", response_model=SRSToolResultResponse)
+async def save_srs_requirements(
+ job_id: int,
+ payload: SRSToolRequirementsSaveRequest,
+ db: Session = Depends(get_db),
+ current_user: User = Depends(get_current_user),
+) -> Any:
+ job = (
+ db.query(ToolJob)
+ .filter(ToolJob.id == job_id, ToolJob.user_id == current_user.id)
+ .first()
+ )
+ if not job:
+ raise HTTPException(status_code=404, detail="任务不存在")
+
+ extraction = (
+ db.query(SRSExtraction)
+ .filter(SRSExtraction.job_id == job.id)
+ .first()
+ )
+ if not extraction:
+ raise HTTPException(status_code=404, detail="任务结果不存在")
+
+ replace_requirements(db, extraction, [item.dict() for item in payload.requirements])
+ db.add(extraction)
+ db.commit()
+ db.refresh(extraction)
+
+ return build_result_response(job, extraction)
diff --git a/rag-web-ui/backend/app/api/openapi/api.py b/rag-web-ui/backend/app/api/openapi/api.py
new file mode 100644
index 0000000..6556f4a
--- /dev/null
+++ b/rag-web-ui/backend/app/api/openapi/api.py
@@ -0,0 +1,6 @@
+from fastapi import APIRouter
+
+from app.api.openapi import knowledge
+
+router = APIRouter()
+router.include_router(knowledge.router, prefix="/knowledge", tags=["knowledge"])
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/api/openapi/knowledge.py b/rag-web-ui/backend/app/api/openapi/knowledge.py
new file mode 100644
index 0000000..37e394f
--- /dev/null
+++ b/rag-web-ui/backend/app/api/openapi/knowledge.py
@@ -0,0 +1,60 @@
+from typing import Any, List
+from fastapi import APIRouter, Depends, HTTPException
+from sqlalchemy.orm import Session
+from langchain_chroma import Chroma
+from app.services.vector_store import VectorStoreFactory
+
+from app import models
+from app.db.session import get_db
+from app.core.security import get_api_key_user
+from app.core.config import settings
+from app.services.embedding.embedding_factory import EmbeddingsFactory
+
+router = APIRouter()
+
+@router.get("/{knowledge_base_id}/query")
+def query_knowledge_base(
+ *,
+ db: Session = Depends(get_db),
+ knowledge_base_id: int,
+ query: str,
+ top_k: int = 3,
+ current_user: models.User = Depends(get_api_key_user),
+) -> Any:
+ """
+ Query a specific knowledge base using API key authentication
+ """
+ try:
+ kb = db.query(models.KnowledgeBase).filter(
+ models.KnowledgeBase.id == knowledge_base_id,
+ models.KnowledgeBase.user_id == current_user.id
+ ).first()
+
+ if not kb:
+ raise HTTPException(
+ status_code=404,
+ detail=f"Knowledge base {knowledge_base_id} not found",
+ )
+
+ embeddings = EmbeddingsFactory.create()
+
+ vector_store = VectorStoreFactory.create(
+ store_type=settings.VECTOR_STORE_TYPE,
+ collection_name=f"kb_{knowledge_base_id}",
+ embedding_function=embeddings,
+ )
+
+ results = vector_store.similarity_search_with_score(query, k=top_k)
+
+ response = []
+ for doc, score in results:
+ response.append({
+ "content": doc.page_content,
+ "metadata": doc.metadata,
+ "score": float(score)
+ })
+
+ return {"results": response}
+
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/core/__init__.py b/rag-web-ui/backend/app/core/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/rag-web-ui/backend/app/core/config.py b/rag-web-ui/backend/app/core/config.py
new file mode 100644
index 0000000..991a8b1
--- /dev/null
+++ b/rag-web-ui/backend/app/core/config.py
@@ -0,0 +1,123 @@
+import os
+from typing import List, Optional
+
+from pydantic_settings import BaseSettings
+
+
+class Settings(BaseSettings):
+ PROJECT_NAME: str = "RAG Web UI" # Project name
+ VERSION: str = "0.1.0" # Project version
+ API_V1_STR: str = "/api" # API version string
+
+ # MySQL settings
+ MYSQL_SERVER: str = os.getenv("MYSQL_SERVER", "localhost")
+ MYSQL_PORT: int = int(os.getenv("MYSQL_PORT", "3306"))
+ MYSQL_USER: str = os.getenv("MYSQL_USER", "ragagent")
+ MYSQL_PASSWORD: str = os.getenv("MYSQL_PASSWORD", "ragagent")
+ MYSQL_DATABASE: str = os.getenv("MYSQL_DATABASE", "ragagent")
+ SQLALCHEMY_DATABASE_URI: Optional[str] = None
+
+ @property
+ def get_database_url(self) -> str:
+ if self.SQLALCHEMY_DATABASE_URI:
+ return self.SQLALCHEMY_DATABASE_URI
+ return (
+ f"mysql+mysqlconnector://{self.MYSQL_USER}:{self.MYSQL_PASSWORD}"
+ f"@{self.MYSQL_SERVER}:{self.MYSQL_PORT}/{self.MYSQL_DATABASE}"
+ )
+
+ # JWT settings
+ SECRET_KEY: str = os.getenv("SECRET_KEY", "your-secret-key-here")
+ ALGORITHM: str = "HS256"
+ ACCESS_TOKEN_EXPIRE_MINUTES: int = int(os.getenv("ACCESS_TOKEN_EXPIRE_MINUTES", "10080"))
+
+ # Chat Provider settings
+ CHAT_PROVIDER: str = os.getenv("CHAT_PROVIDER", "openai")
+
+ # Embeddings settings
+ EMBEDDINGS_PROVIDER: str = os.getenv("EMBEDDINGS_PROVIDER", "openai")
+
+ # MinIO settings
+ MINIO_ENDPOINT: str = os.getenv("MINIO_ENDPOINT", "localhost:9000")
+ MINIO_ACCESS_KEY: str = os.getenv("MINIO_ACCESS_KEY", "minioadmin")
+ MINIO_SECRET_KEY: str = os.getenv("MINIO_SECRET_KEY", "minioadmin")
+ MINIO_BUCKET_NAME: str = os.getenv("MINIO_BUCKET_NAME", "documents")
+
+ # Shared model API key fallback
+ API_KEY: str = os.getenv("API_KEY", "")
+
+ # OpenAI settings
+ OPENAI_API_BASE: str = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
+ OPENAI_API_KEY: str = os.getenv(
+ "OPENAI_API_KEY", os.getenv("API_KEY", "your-openai-api-key-here")
+ )
+ OPENAI_MODEL: str = os.getenv("OPENAI_MODEL", "gpt-4")
+ OPENAI_EMBEDDINGS_MODEL: str = os.getenv("OPENAI_EMBEDDINGS_MODEL", "text-embedding-ada-002")
+
+ # DashScope settings
+ DASH_SCOPE_API_KEY: str = os.getenv(
+ "DASH_SCOPE_API_KEY",
+ os.getenv("DASHSCOPE_API_KEY", os.getenv("API_KEY", "")),
+ )
+ DASH_SCOPE_API_BASE: str = os.getenv(
+ "DASH_SCOPE_API_BASE", "https://dashscope.aliyuncs.com/compatible-mode/v1"
+ )
+ DASH_SCOPE_CHAT_MODEL: str = os.getenv("DASH_SCOPE_CHAT_MODEL", "qwen3-max")
+ DASH_SCOPE_EMBEDDINGS_MODEL: str = os.getenv("DASH_SCOPE_EMBEDDINGS_MODEL", "")
+
+ # Vector Store settings
+ VECTOR_STORE_TYPE: str = os.getenv("VECTOR_STORE_TYPE", "chroma")
+
+ # External reranker settings
+ RERANKER_API_URL: str = os.getenv("RERANKER_API_URL", "")
+ RERANKER_API_KEY: str = os.getenv(
+ "RERANKER_API_KEY",
+ os.getenv(
+ "DASH_SCOPE_API_KEY",
+ os.getenv("DASHSCOPE_API_KEY", os.getenv("API_KEY", "")),
+ ),
+ )
+ RERANKER_MODEL: str = os.getenv("RERANKER_MODEL", "")
+ RERANKER_TIMEOUT_SECONDS: float = float(os.getenv("RERANKER_TIMEOUT_SECONDS", "8"))
+ RERANKER_WEIGHT: float = float(os.getenv("RERANKER_WEIGHT", "0.75"))
+
+ # GraphRAG settings
+ GRAPHRAG_ENABLED: bool = os.getenv("GRAPHRAG_ENABLED", "false").lower() == "true"
+ GRAPHRAG_WORKING_DIR: str = os.getenv("GRAPHRAG_WORKING_DIR", "./graphrag_cache")
+ GRAPHRAG_GRAPH_STORAGE: str = os.getenv("GRAPHRAG_GRAPH_STORAGE", "neo4j")
+ GRAPHRAG_QUERY_LEVEL: int = int(os.getenv("GRAPHRAG_QUERY_LEVEL", "2"))
+ GRAPHRAG_LOCAL_TOP_K: int = int(os.getenv("GRAPHRAG_LOCAL_TOP_K", "20"))
+ GRAPHRAG_ENTITY_EXTRACT_MAX_GLEANING: int = int(os.getenv("GRAPHRAG_ENTITY_EXTRACT_MAX_GLEANING", "1"))
+ GRAPHRAG_EMBEDDING_DIM: int = int(os.getenv("GRAPHRAG_EMBEDDING_DIM", "1024"))
+ GRAPHRAG_EMBEDDING_MAX_TOKEN_SIZE: int = int(os.getenv("GRAPHRAG_EMBEDDING_MAX_TOKEN_SIZE", "8192"))
+
+ # Neo4j settings
+ NEO4J_URL: str = os.getenv("NEO4J_URL", "bolt://localhost:7687")
+ NEO4J_USERNAME: str = os.getenv("NEO4J_USERNAME", "neo4j")
+ NEO4J_PASSWORD: str = os.getenv("NEO4J_PASSWORD", "neo4j")
+
+ # Chroma DB settings
+ CHROMA_DB_HOST: str = os.getenv("CHROMA_DB_HOST", "chromadb")
+ CHROMA_DB_PORT: int = int(os.getenv("CHROMA_DB_PORT", "8000"))
+
+ # Qdrant DB settings
+ QDRANT_URL: str = os.getenv("QDRANT_URL", "http://localhost:6333")
+ QDRANT_PREFER_GRPC: bool = os.getenv("QDRANT_PREFER_GRPC", "true").lower() == "true"
+
+ # Deepseek settings
+ DEEPSEEK_API_KEY: str = ""
+ DEEPSEEK_API_BASE: str = "https://api.deepseek.com/v1" # 默认 API 地址
+ DEEPSEEK_MODEL: str = "deepseek-chat" # 默认模型名称
+
+ # Ollama settings
+ OLLAMA_API_BASE: str = "http://localhost:11434"
+ OLLAMA_MODEL: str = "deepseek-r1:7b"
+ OLLAMA_EMBEDDINGS_MODEL: str = os.getenv(
+ "OLLAMA_EMBEDDINGS_MODEL", "nomic-embed-text"
+ ) # Added this line
+
+ class Config:
+ env_file = ".env"
+
+
+settings = Settings()
diff --git a/rag-web-ui/backend/app/core/minio.py b/rag-web-ui/backend/app/core/minio.py
new file mode 100644
index 0000000..73b01f2
--- /dev/null
+++ b/rag-web-ui/backend/app/core/minio.py
@@ -0,0 +1,29 @@
+import logging
+from minio import Minio
+from app.core.config import settings
+
+logger = logging.getLogger(__name__)
+
+def get_minio_client() -> Minio:
+ """
+ Get a MinIO client instance.
+ """
+ logger.info("Creating MinIO client instance.")
+ return Minio(
+ settings.MINIO_ENDPOINT,
+ access_key=settings.MINIO_ACCESS_KEY,
+ secret_key=settings.MINIO_SECRET_KEY,
+ secure=False # Set to True if using HTTPS
+ )
+
+def init_minio():
+ """
+ Initialize MinIO by creating the bucket if it doesn't exist.
+ """
+ client = get_minio_client()
+ logger.info(f"Checking if bucket {settings.MINIO_BUCKET_NAME} exists.")
+ if not client.bucket_exists(settings.MINIO_BUCKET_NAME):
+ logger.info(f"Bucket {settings.MINIO_BUCKET_NAME} does not exist. Creating bucket.")
+ client.make_bucket(settings.MINIO_BUCKET_NAME)
+ else:
+ logger.info(f"Bucket {settings.MINIO_BUCKET_NAME} already exists.")
diff --git a/rag-web-ui/backend/app/core/runtime_checks.py b/rag-web-ui/backend/app/core/runtime_checks.py
new file mode 100644
index 0000000..cc50d76
--- /dev/null
+++ b/rag-web-ui/backend/app/core/runtime_checks.py
@@ -0,0 +1,27 @@
+import logging
+
+from app.core.config import Settings
+
+logger = logging.getLogger(__name__)
+
+
+def validate_runtime_settings(settings: Settings) -> None:
+ errors = []
+
+ if settings.GRAPHRAG_ENABLED:
+ if settings.GRAPHRAG_GRAPH_STORAGE.lower() not in {"neo4j", "networkx"}:
+ errors.append("GRAPHRAG_GRAPH_STORAGE must be either 'neo4j' or 'networkx'.")
+
+ if settings.GRAPHRAG_GRAPH_STORAGE.lower() == "neo4j":
+ if not settings.NEO4J_URL:
+ errors.append("NEO4J_URL is required when GraphRAG Neo4j storage is enabled.")
+ if not settings.NEO4J_USERNAME:
+ errors.append("NEO4J_USERNAME is required when GraphRAG Neo4j storage is enabled.")
+ if not settings.NEO4J_PASSWORD:
+ errors.append("NEO4J_PASSWORD is required when GraphRAG Neo4j storage is enabled.")
+
+ if settings.RERANKER_API_URL and not settings.RERANKER_MODEL:
+ logger.warning("RERANKER_API_URL is configured but RERANKER_MODEL is empty. The API may reject requests.")
+
+ if errors:
+ raise ValueError("Runtime configuration validation failed: " + " | ".join(errors))
diff --git a/rag-web-ui/backend/app/core/security.py b/rag-web-ui/backend/app/core/security.py
new file mode 100644
index 0000000..546b787
--- /dev/null
+++ b/rag-web-ui/backend/app/core/security.py
@@ -0,0 +1,84 @@
+from datetime import datetime, timedelta
+from typing import Optional
+from jose import JWTError, jwt
+import bcrypt
+from app.core.config import settings
+from fastapi import Depends, HTTPException, status, Security
+from fastapi.security import OAuth2PasswordBearer, APIKeyHeader
+from sqlalchemy.orm import Session
+from app.db.session import get_db
+from app.models.user import User
+from app.services.api_key import APIKeyService
+
+oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/v1/auth/login/access-token")
+api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
+
+def verify_password(plain_password: str, hashed_password: str) -> bool:
+ return bcrypt.checkpw(plain_password.encode("utf-8"), hashed_password.encode("utf-8"))
+
+def get_password_hash(password: str) -> str:
+ return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()).decode("utf-8")
+
+def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str:
+ to_encode = data.copy()
+ if expires_delta:
+ expire = datetime.utcnow() + expires_delta
+ else:
+ expire = datetime.utcnow() + timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
+ to_encode.update({"exp": expire})
+ encoded_jwt = jwt.encode(to_encode, settings.SECRET_KEY, algorithm=settings.ALGORITHM)
+ return encoded_jwt
+
+def get_current_user(
+ db: Session = Depends(get_db),
+ token: str = Depends(oauth2_scheme)
+) -> User:
+ credentials_exception = HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail="Could not validate credentials",
+ headers={"WWW-Authenticate": "Bearer"},
+ )
+ try:
+ payload = jwt.decode(token, settings.SECRET_KEY, algorithms=[settings.ALGORITHM])
+ username: str = payload.get("sub")
+ if username is None:
+ raise credentials_exception
+ except JWTError:
+ raise credentials_exception
+
+ user = db.query(User).filter(User.username == username).first()
+ if user is None:
+ raise credentials_exception
+ if not user.is_active:
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail="Inactive user",
+ headers={"WWW-Authenticate": "Bearer"},
+ )
+ return user
+
+def get_api_key_user(
+ db: Session = Depends(get_db),
+ api_key: str = Security(api_key_header),
+) -> User:
+ if not api_key:
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail="API key header missing",
+ )
+
+ api_key_obj = APIKeyService.get_api_key_by_key(db=db, key=api_key)
+ if not api_key_obj:
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail="Invalid API key",
+ )
+
+ if not api_key_obj.is_active:
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail="Inactive API key",
+ )
+
+ APIKeyService.update_last_used(db=db, api_key=api_key_obj)
+ return api_key_obj.user
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/db/__init__.py b/rag-web-ui/backend/app/db/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/rag-web-ui/backend/app/db/session.py b/rag-web-ui/backend/app/db/session.py
new file mode 100644
index 0000000..cccc624
--- /dev/null
+++ b/rag-web-ui/backend/app/db/session.py
@@ -0,0 +1,13 @@
+from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+from app.core.config import settings
+
+engine = create_engine(settings.get_database_url)
+SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
+
+def get_db():
+ db = SessionLocal()
+ try:
+ yield db
+ finally:
+ db.close()
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/main.py b/rag-web-ui/backend/app/main.py
new file mode 100644
index 0000000..ec1efaf
--- /dev/null
+++ b/rag-web-ui/backend/app/main.py
@@ -0,0 +1,47 @@
+import logging
+
+from app.api.api_v1.api import api_router
+from app.api.openapi.api import router as openapi_router
+from app.core.config import settings
+from app.core.minio import init_minio
+from app.core.runtime_checks import validate_runtime_settings
+from app.startup.migarate import DatabaseMigrator
+from fastapi import FastAPI
+
+logging.basicConfig(
+ level=logging.INFO,
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
+)
+
+app = FastAPI(
+ title=settings.PROJECT_NAME,
+ version=settings.VERSION,
+ openapi_url=f"{settings.API_V1_STR}/openapi.json",
+)
+
+# Include routers
+app.include_router(api_router, prefix=settings.API_V1_STR)
+app.include_router(openapi_router, prefix="/openapi")
+
+
+@app.on_event("startup")
+async def startup_event():
+ validate_runtime_settings(settings)
+ # Initialize MinIO
+ init_minio()
+ # Run database migrations
+ migrator = DatabaseMigrator(settings.get_database_url)
+ migrator.run_migrations()
+
+
+@app.get("/")
+def root():
+ return {"message": "Welcome to RAG Web UI API"}
+
+
+@app.get("/api/health")
+async def health_check():
+ return {
+ "status": "healthy",
+ "version": settings.VERSION,
+ }
diff --git a/rag-web-ui/backend/app/models/__init__.py b/rag-web-ui/backend/app/models/__init__.py
new file mode 100644
index 0000000..0e8caac
--- /dev/null
+++ b/rag-web-ui/backend/app/models/__init__.py
@@ -0,0 +1,18 @@
+from .user import User
+from .knowledge import KnowledgeBase, Document, DocumentChunk
+from .chat import Chat, Message
+from .api_key import APIKey
+from .tooling import ToolJob, SRSExtraction, SRSRequirement
+
+__all__ = [
+ "User",
+ "KnowledgeBase",
+ "Document",
+ "DocumentChunk",
+ "Chat",
+ "Message",
+ "APIKey",
+ "ToolJob",
+ "SRSExtraction",
+ "SRSRequirement",
+]
diff --git a/rag-web-ui/backend/app/models/api_key.py b/rag-web-ui/backend/app/models/api_key.py
new file mode 100644
index 0000000..989f215
--- /dev/null
+++ b/rag-web-ui/backend/app/models/api_key.py
@@ -0,0 +1,18 @@
+from sqlalchemy import Column, Integer, String, Boolean, DateTime, ForeignKey, VARCHAR
+from sqlalchemy.orm import relationship
+from sqlalchemy.sql import func
+
+from app.models.base import Base, TimestampMixin
+
+class APIKey(Base, TimestampMixin):
+ __tablename__ = "api_keys"
+
+ id = Column(Integer, primary_key=True, index=True)
+ key = Column(VARCHAR(128), unique=True, index=True, nullable=False)
+ name = Column(String(255), nullable=False)
+ user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
+ is_active = Column(Boolean, default=True, nullable=False)
+ last_used_at = Column(DateTime(timezone=True), nullable=True)
+
+ # Relationships
+ user = relationship("User", back_populates="api_keys")
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/models/base.py b/rag-web-ui/backend/app/models/base.py
new file mode 100644
index 0000000..2d36066
--- /dev/null
+++ b/rag-web-ui/backend/app/models/base.py
@@ -0,0 +1,9 @@
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy import Column, Integer, DateTime
+from datetime import datetime
+
+Base = declarative_base()
+
+class TimestampMixin:
+ created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
+ updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/models/chat.py b/rag-web-ui/backend/app/models/chat.py
new file mode 100644
index 0000000..310062e
--- /dev/null
+++ b/rag-web-ui/backend/app/models/chat.py
@@ -0,0 +1,39 @@
+from sqlalchemy import Column, Integer, String, ForeignKey, Boolean, Table
+from sqlalchemy.dialects.mysql import LONGTEXT
+from sqlalchemy.orm import relationship
+from app.models.base import Base, TimestampMixin
+
+# Association table for many-to-many relationship between Chat and KnowledgeBase
+chat_knowledge_bases = Table(
+ "chat_knowledge_bases",
+ Base.metadata,
+ Column("chat_id", Integer, ForeignKey("chats.id"), primary_key=True),
+ Column("knowledge_base_id", Integer, ForeignKey("knowledge_bases.id"), primary_key=True),
+)
+
+class Chat(Base, TimestampMixin):
+ __tablename__ = "chats"
+
+ id = Column(Integer, primary_key=True, index=True)
+ title = Column(String(255), nullable=False)
+ user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
+
+ # Relationships
+ messages = relationship("Message", back_populates="chat", cascade="all, delete-orphan")
+ user = relationship("User", back_populates="chats")
+ knowledge_bases = relationship(
+ "KnowledgeBase",
+ secondary=chat_knowledge_bases,
+ backref="chats"
+ )
+
+class Message(Base, TimestampMixin):
+ __tablename__ = "messages"
+
+ id = Column(Integer, primary_key=True, index=True)
+ content = Column(LONGTEXT, nullable=False)
+ role = Column(String(50), nullable=False)
+ chat_id = Column(Integer, ForeignKey("chats.id"), nullable=False)
+
+ # Relationships
+ chat = relationship("Chat", back_populates="messages")
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/models/knowledge.py b/rag-web-ui/backend/app/models/knowledge.py
new file mode 100644
index 0000000..ebde77f
--- /dev/null
+++ b/rag-web-ui/backend/app/models/knowledge.py
@@ -0,0 +1,97 @@
+from sqlalchemy import Column, Integer, String, ForeignKey, Text, DateTime, JSON, BigInteger, TIMESTAMP, text
+from sqlalchemy.dialects.mysql import LONGTEXT
+from sqlalchemy.orm import relationship
+from app.models.base import Base, TimestampMixin
+from datetime import datetime
+import sqlalchemy as sa
+
+class KnowledgeBase(Base, TimestampMixin):
+ __tablename__ = "knowledge_bases"
+
+ id = Column(Integer, primary_key=True, index=True)
+ name = Column(String(255), nullable=False)
+ description = Column(LONGTEXT)
+ user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
+
+ # Relationships
+ documents = relationship("Document", back_populates="knowledge_base", cascade="all, delete-orphan")
+ user = relationship("User", back_populates="knowledge_bases")
+ processing_tasks = relationship("ProcessingTask", back_populates="knowledge_base")
+ chunks = relationship("DocumentChunk", back_populates="knowledge_base", cascade="all, delete-orphan")
+ document_uploads = relationship("DocumentUpload", back_populates="knowledge_base", cascade="all, delete-orphan")
+
+class Document(Base, TimestampMixin):
+ __tablename__ = "documents"
+
+ id = Column(Integer, primary_key=True, index=True)
+ file_path = Column(String(255), nullable=False) # Path in MinIO
+ file_name = Column(String(255), nullable=False) # Actual file name
+ file_size = Column(BigInteger, nullable=False) # File size in bytes
+ content_type = Column(String(100), nullable=False) # MIME type
+ file_hash = Column(String(64), index=True) # SHA-256 hash of file content
+ knowledge_base_id = Column(Integer, ForeignKey("knowledge_bases.id"), nullable=False)
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
+
+ # Relationships
+ knowledge_base = relationship("KnowledgeBase", back_populates="documents")
+ processing_tasks = relationship("ProcessingTask", back_populates="document")
+ chunks = relationship("DocumentChunk", back_populates="document", cascade="all, delete-orphan")
+
+ __table_args__ = (
+ # Ensure file_name is unique within each knowledge base
+ sa.UniqueConstraint('knowledge_base_id', 'file_name', name='uq_kb_file_name'),
+ )
+
+class DocumentUpload(Base):
+ __tablename__ = "document_uploads"
+
+ id = Column(Integer, primary_key=True, index=True)
+ knowledge_base_id = Column(Integer, ForeignKey("knowledge_bases.id", ondelete="CASCADE"), nullable=False)
+ file_name = Column(String, nullable=False)
+ file_hash = Column(String, nullable=False)
+ file_size = Column(BigInteger, nullable=False)
+ content_type = Column(String, nullable=False)
+ temp_path = Column(String, nullable=False)
+ created_at = Column(TIMESTAMP, nullable=False, server_default=text("now()"))
+ status = Column(String, nullable=False, server_default="pending")
+ error_message = Column(Text)
+
+ # Relationships
+ knowledge_base = relationship("KnowledgeBase", back_populates="document_uploads")
+
+class ProcessingTask(Base):
+ __tablename__ = "processing_tasks"
+
+ id = Column(Integer, primary_key=True, index=True)
+ knowledge_base_id = Column(Integer, ForeignKey("knowledge_bases.id"))
+ document_id = Column(Integer, ForeignKey("documents.id"), nullable=True)
+ document_upload_id = Column(Integer, ForeignKey("document_uploads.id"), nullable=True)
+ status = Column(String(50), default="pending") # pending, processing, completed, failed
+ error_message = Column(Text, nullable=True)
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
+
+ knowledge_base = relationship("KnowledgeBase", back_populates="processing_tasks")
+ document = relationship("Document", back_populates="processing_tasks")
+ document_upload = relationship("DocumentUpload", backref="processing_tasks")
+
+class DocumentChunk(Base, TimestampMixin):
+ __tablename__ = "document_chunks"
+
+ id = Column(String(64), primary_key=True) # SHA-256 hash as ID
+ kb_id = Column(Integer, ForeignKey("knowledge_bases.id"), nullable=False)
+ document_id = Column(Integer, ForeignKey("documents.id"), nullable=False)
+ file_name = Column(String(255), nullable=False)
+ chunk_metadata = Column(JSON, nullable=True)
+ hash = Column(String(64), nullable=False, index=True) # Content hash for change detection
+
+ # Relationships
+ knowledge_base = relationship("KnowledgeBase", back_populates="chunks")
+ document = relationship("Document", back_populates="chunks")
+
+ __table_args__ = (
+ sa.Index('idx_kb_file_name', 'kb_id', 'file_name'),
+ )
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/models/tooling.py b/rag-web-ui/backend/app/models/tooling.py
new file mode 100644
index 0000000..3b3f28a
--- /dev/null
+++ b/rag-web-ui/backend/app/models/tooling.py
@@ -0,0 +1,76 @@
+from datetime import datetime
+
+import sqlalchemy as sa
+from sqlalchemy import Column, DateTime, ForeignKey, Integer, JSON, String, Text
+from sqlalchemy.dialects.mysql import LONGTEXT
+from sqlalchemy.orm import relationship
+
+from app.models.base import Base, TimestampMixin
+
+
+class ToolJob(Base, TimestampMixin):
+ __tablename__ = "tool_jobs"
+
+ id = Column(Integer, primary_key=True, index=True)
+ user_id = Column(Integer, ForeignKey("users.id"), nullable=False, index=True)
+ tool_name = Column(String(128), nullable=False, index=True)
+ status = Column(String(32), nullable=False, default="pending")
+ input_file_name = Column(String(255), nullable=False)
+ input_file_path = Column(String(512), nullable=False)
+ error_message = Column(Text, nullable=True)
+ started_at = Column(DateTime, nullable=True)
+ completed_at = Column(DateTime, nullable=True)
+ output_summary = Column(JSON, nullable=True)
+
+ user = relationship("User")
+ srs_extraction = relationship(
+ "SRSExtraction",
+ back_populates="job",
+ uselist=False,
+ cascade="all, delete-orphan",
+ )
+
+
+class SRSExtraction(Base, TimestampMixin):
+ __tablename__ = "srs_extractions"
+
+ id = Column(Integer, primary_key=True, index=True)
+ job_id = Column(Integer, ForeignKey("tool_jobs.id", ondelete="CASCADE"), nullable=False, unique=True)
+ document_name = Column(String(255), nullable=False)
+ document_title = Column(String(255), nullable=False)
+ generated_at = Column(DateTime, default=datetime.utcnow, nullable=False)
+ total_requirements = Column(Integer, nullable=False, default=0)
+ statistics = Column(JSON, nullable=True)
+ raw_output = Column(JSON, nullable=True)
+
+ job = relationship("ToolJob", back_populates="srs_extraction")
+ requirements = relationship(
+ "SRSRequirement",
+ back_populates="extraction",
+ cascade="all, delete-orphan",
+ order_by="SRSRequirement.sort_order",
+ )
+
+
+class SRSRequirement(Base, TimestampMixin):
+ __tablename__ = "srs_requirements"
+
+ id = Column(Integer, primary_key=True, index=True)
+ extraction_id = Column(Integer, ForeignKey("srs_extractions.id", ondelete="CASCADE"), nullable=False)
+ requirement_uid = Column(String(64), nullable=False)
+ title = Column(String(255), nullable=False)
+ description = Column(LONGTEXT, nullable=False)
+ priority = Column(String(16), nullable=False, default="中")
+ acceptance_criteria = Column(JSON, nullable=False)
+ source_field = Column(String(255), nullable=False)
+ section_number = Column(String(64), nullable=True)
+ section_title = Column(String(255), nullable=True)
+ requirement_type = Column(String(64), nullable=True)
+ sort_order = Column(Integer, nullable=False, default=0)
+
+ extraction = relationship("SRSExtraction", back_populates="requirements")
+
+ __table_args__ = (
+ sa.UniqueConstraint("extraction_id", "requirement_uid", name="uq_srs_extraction_requirement_uid"),
+ sa.Index("idx_srs_requirements_extraction_sort", "extraction_id", "sort_order"),
+ )
diff --git a/rag-web-ui/backend/app/models/user.py b/rag-web-ui/backend/app/models/user.py
new file mode 100644
index 0000000..8817f14
--- /dev/null
+++ b/rag-web-ui/backend/app/models/user.py
@@ -0,0 +1,18 @@
+from sqlalchemy import Boolean, Column, Integer, String
+from sqlalchemy.orm import relationship
+from app.models.base import Base, TimestampMixin
+
+class User(Base, TimestampMixin):
+ __tablename__ = "users"
+
+ id = Column(Integer, primary_key=True, index=True)
+ email = Column(String(255), unique=True, index=True, nullable=False)
+ username = Column(String(255), unique=True, index=True, nullable=False)
+ hashed_password = Column(String(255), nullable=False)
+ is_active = Column(Boolean, default=True)
+ is_superuser = Column(Boolean, default=False)
+
+ # Relationships
+ knowledge_bases = relationship("KnowledgeBase", back_populates="user")
+ chats = relationship("Chat", back_populates="user")
+ api_keys = relationship("APIKey", back_populates="user", cascade="all, delete-orphan")
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/schemas/__init__.py b/rag-web-ui/backend/app/schemas/__init__.py
new file mode 100644
index 0000000..327fcaa
--- /dev/null
+++ b/rag-web-ui/backend/app/schemas/__init__.py
@@ -0,0 +1,12 @@
+from .api_key import APIKey, APIKeyCreate, APIKeyUpdate, APIKeyInDB
+from .user import UserBase, UserCreate, UserUpdate, UserResponse
+from .token import Token, TokenPayload
+from .knowledge import KnowledgeBaseBase, KnowledgeBaseCreate, KnowledgeBaseUpdate, KnowledgeBaseResponse
+from .testing import (
+ ExpectedResultEntry,
+ StepLogEntry,
+ TestCaseEntry,
+ TestItemEntry,
+ TestingPipelineRequest,
+ TestingPipelineResponse,
+)
diff --git a/rag-web-ui/backend/app/schemas/api_key.py b/rag-web-ui/backend/app/schemas/api_key.py
new file mode 100644
index 0000000..25830eb
--- /dev/null
+++ b/rag-web-ui/backend/app/schemas/api_key.py
@@ -0,0 +1,28 @@
+from typing import Optional
+from datetime import datetime
+from pydantic import BaseModel
+
+class APIKeyBase(BaseModel):
+ name: str
+ is_active: bool = True
+
+class APIKeyCreate(APIKeyBase):
+ pass
+
+class APIKeyUpdate(BaseModel):
+ name: Optional[str] = None
+ is_active: Optional[bool] = None
+
+class APIKey(APIKeyBase):
+ id: int
+ key: str
+ user_id: int
+ last_used_at: Optional[datetime] = None
+ created_at: datetime
+ updated_at: datetime
+
+ class Config:
+ from_attributes = True
+
+class APIKeyInDB(APIKey):
+ pass
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/schemas/chat.py b/rag-web-ui/backend/app/schemas/chat.py
new file mode 100644
index 0000000..4394c50
--- /dev/null
+++ b/rag-web-ui/backend/app/schemas/chat.py
@@ -0,0 +1,39 @@
+from pydantic import BaseModel
+from typing import List, Optional
+from datetime import datetime
+
+class MessageBase(BaseModel):
+ content: str
+ role: str
+
+class MessageCreate(MessageBase):
+ chat_id: int
+
+class MessageResponse(MessageBase):
+ id: int
+ chat_id: int
+ created_at: datetime
+ updated_at: datetime
+
+ class Config:
+ from_attributes = True
+
+class ChatBase(BaseModel):
+ title: str
+
+class ChatCreate(ChatBase):
+ knowledge_base_ids: List[int]
+
+class ChatUpdate(ChatBase):
+ knowledge_base_ids: Optional[List[int]] = None
+
+class ChatResponse(ChatBase):
+ id: int
+ user_id: int
+ created_at: datetime
+ updated_at: datetime
+ messages: List[MessageResponse] = []
+ knowledge_base_ids: List[int] = []
+
+ class Config:
+ from_attributes = True
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/schemas/knowledge.py b/rag-web-ui/backend/app/schemas/knowledge.py
new file mode 100644
index 0000000..84ccd3c
--- /dev/null
+++ b/rag-web-ui/backend/app/schemas/knowledge.py
@@ -0,0 +1,85 @@
+from typing import Optional, List
+from datetime import datetime
+from pydantic import BaseModel
+
+class KnowledgeBaseBase(BaseModel):
+ name: str
+ description: Optional[str] = None
+
+class KnowledgeBaseCreate(KnowledgeBaseBase):
+ pass
+
+class KnowledgeBaseUpdate(KnowledgeBaseBase):
+ pass
+
+class DocumentBase(BaseModel):
+ file_name: str
+ file_path: str
+ file_hash: str
+ file_size: int
+ content_type: str
+
+class DocumentCreate(DocumentBase):
+ knowledge_base_id: int
+
+class DocumentUploadBase(BaseModel):
+ file_name: str
+ file_hash: str
+ file_size: int
+ content_type: str
+ temp_path: str
+ status: str = "pending"
+ error_message: Optional[str] = None
+
+class DocumentUploadCreate(DocumentUploadBase):
+ knowledge_base_id: int
+
+class DocumentUploadResponse(DocumentUploadBase):
+ id: int
+ created_at: datetime
+
+ class Config:
+ from_attributes = True
+
+class ProcessingTaskBase(BaseModel):
+ status: str
+ error_message: Optional[str] = None
+
+class ProcessingTaskCreate(ProcessingTaskBase):
+ document_id: int
+ knowledge_base_id: int
+
+class ProcessingTask(ProcessingTaskBase):
+ id: int
+ document_id: int
+ knowledge_base_id: int
+ created_at: datetime
+ updated_at: datetime
+
+ class Config:
+ from_attributes = True
+
+class DocumentResponse(DocumentBase):
+ id: int
+ knowledge_base_id: int
+ created_at: datetime
+ updated_at: datetime
+ processing_tasks: List[ProcessingTask] = []
+
+ class Config:
+ from_attributes = True
+
+class KnowledgeBaseResponse(KnowledgeBaseBase):
+ id: int
+ user_id: int
+ created_at: datetime
+ updated_at: datetime
+ documents: List[DocumentResponse] = []
+
+ class Config:
+ from_attributes = True
+
+class PreviewRequest(BaseModel):
+ document_ids: List[int]
+ chunk_size: int = 1000
+ chunk_overlap: int = 200
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/schemas/testing.py b/rag-web-ui/backend/app/schemas/testing.py
new file mode 100644
index 0000000..5cf06a6
--- /dev/null
+++ b/rag-web-ui/backend/app/schemas/testing.py
@@ -0,0 +1,59 @@
+from typing import Dict, List, Optional
+
+from pydantic import BaseModel, Field
+
+
+class TestItemEntry(BaseModel):
+ id: str
+ content: str
+
+
+class TestCaseEntry(BaseModel):
+ id: str
+ item_id: str
+ operation_steps: List[str]
+ test_content: str
+ expected_result_placeholder: str
+
+
+class ExpectedResultEntry(BaseModel):
+ id: str
+ case_id: str
+ result: str
+
+
+class StepLogEntry(BaseModel):
+ step_name: str
+ input_summary: str
+ output_summary: str
+ success: bool
+ fallback_used: bool
+ duration_ms: float
+
+
+class TestingPipelineRequest(BaseModel):
+ requirement_text: str = Field(..., min_length=1)
+ requirement_type: Optional[str] = None
+ knowledge_base_ids: List[int] = []
+ retrieval_top_k: int = Field(default=8, ge=1, le=20)
+ knowledge_context: Optional[str] = None
+ use_model_generation: bool = True
+ max_items_per_group: int = Field(default=12, ge=4, le=30)
+ cases_per_item: int = Field(default=2, ge=1, le=5)
+ max_focus_points: int = Field(default=6, ge=3, le=12)
+ max_llm_calls: int = Field(default=10, ge=0, le=100)
+ debug: bool = False
+
+
+class TestingPipelineResponse(BaseModel):
+ trace_id: str
+ requirement_type: str
+ reason: str
+ candidates: List[str]
+ test_items: Dict[str, List[TestItemEntry]]
+ test_cases: Dict[str, List[TestCaseEntry]]
+ expected_results: Dict[str, List[ExpectedResultEntry]]
+ formatted_output: str
+ pipeline_summary: str
+ knowledge_used: bool = False
+ step_logs: List[StepLogEntry] = []
diff --git a/rag-web-ui/backend/app/schemas/token.py b/rag-web-ui/backend/app/schemas/token.py
new file mode 100644
index 0000000..cbfeefa
--- /dev/null
+++ b/rag-web-ui/backend/app/schemas/token.py
@@ -0,0 +1,9 @@
+from pydantic import BaseModel
+from typing import Optional
+
+class Token(BaseModel):
+ access_token: str
+ token_type: str
+
+class TokenPayload(BaseModel):
+ sub: Optional[int] = None
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/schemas/tooling.py b/rag-web-ui/backend/app/schemas/tooling.py
new file mode 100644
index 0000000..a11fea3
--- /dev/null
+++ b/rag-web-ui/backend/app/schemas/tooling.py
@@ -0,0 +1,52 @@
+from datetime import datetime
+from typing import Any, Dict, List, Optional
+
+from pydantic import BaseModel
+
+
+class ToolDefinitionResponse(BaseModel):
+ name: str
+ version: str
+ description: str
+ input_schema: Dict[str, Any]
+ output_schema: Dict[str, Any]
+
+
+class SRSToolCreateJobResponse(BaseModel):
+ job_id: int
+ status: str
+
+
+class SRSToolJobStatusResponse(BaseModel):
+ job_id: int
+ tool_name: str
+ status: str
+ error_message: Optional[str] = None
+ extraction_id: Optional[int] = None
+ started_at: Optional[datetime] = None
+ completed_at: Optional[datetime] = None
+
+
+class SRSToolRequirementItem(BaseModel):
+ id: str
+ title: str
+ description: str
+ priority: str
+ acceptanceCriteria: List[str]
+ sourceField: str
+ sectionNumber: Optional[str] = None
+ sectionTitle: Optional[str] = None
+ requirementType: Optional[str] = None
+ sortOrder: int
+
+
+class SRSToolResultResponse(BaseModel):
+ jobId: int
+ documentName: str
+ generatedAt: str
+ statistics: Dict[str, Any]
+ requirements: List[SRSToolRequirementItem]
+
+
+class SRSToolRequirementsSaveRequest(BaseModel):
+ requirements: List[SRSToolRequirementItem]
diff --git a/rag-web-ui/backend/app/schemas/user.py b/rag-web-ui/backend/app/schemas/user.py
new file mode 100644
index 0000000..406e1b9
--- /dev/null
+++ b/rag-web-ui/backend/app/schemas/user.py
@@ -0,0 +1,23 @@
+from pydantic import BaseModel, EmailStr
+from typing import Optional
+from datetime import datetime
+
+class UserBase(BaseModel):
+ email: EmailStr
+ username: str
+ is_active: bool = True
+ is_superuser: bool = False
+
+class UserCreate(UserBase):
+ password: str
+
+class UserUpdate(UserBase):
+ password: Optional[str] = None
+
+class UserResponse(UserBase):
+ id: int
+ created_at: datetime
+ updated_at: datetime
+
+ class Config:
+ from_attributes = True
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/services/__init__.py b/rag-web-ui/backend/app/services/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/rag-web-ui/backend/app/services/api_key.py b/rag-web-ui/backend/app/services/api_key.py
new file mode 100644
index 0000000..69b0ca4
--- /dev/null
+++ b/rag-web-ui/backend/app/services/api_key.py
@@ -0,0 +1,61 @@
+from typing import List, Optional
+from datetime import datetime
+import secrets
+from sqlalchemy.orm import Session
+
+from app.models.api_key import APIKey
+from app.schemas.api_key import APIKeyCreate, APIKeyUpdate
+
+class APIKeyService:
+ @staticmethod
+ def get_api_keys(db: Session, user_id: int, skip: int = 0, limit: int = 100) -> List[APIKey]:
+ return (
+ db.query(APIKey)
+ .filter(APIKey.user_id == user_id)
+ .offset(skip)
+ .limit(limit)
+ .all()
+ )
+
+ @staticmethod
+ def create_api_key(db: Session, user_id: int, name: str) -> APIKey:
+ api_key = APIKey(
+ key=f"sk-{secrets.token_hex(32)}",
+ name=name,
+ user_id=user_id,
+ is_active=True
+ )
+ db.add(api_key)
+ db.commit()
+ db.refresh(api_key)
+ return api_key
+
+ @staticmethod
+ def get_api_key(db: Session, api_key_id: int) -> Optional[APIKey]:
+ return db.query(APIKey).filter(APIKey.id == api_key_id).first()
+
+ @staticmethod
+ def get_api_key_by_key(db: Session, key: str) -> Optional[APIKey]:
+ return db.query(APIKey).filter(APIKey.key == key).first()
+
+ @staticmethod
+ def update_api_key(db: Session, api_key: APIKey, update_data: APIKeyUpdate) -> APIKey:
+ for field, value in update_data.model_dump(exclude_unset=True).items():
+ setattr(api_key, field, value)
+ db.add(api_key)
+ db.commit()
+ db.refresh(api_key)
+ return api_key
+
+ @staticmethod
+ def delete_api_key(db: Session, api_key: APIKey) -> None:
+ db.delete(api_key)
+ db.commit()
+
+ @staticmethod
+ def update_last_used(db: Session, api_key: APIKey) -> APIKey:
+ api_key.last_used_at = datetime.utcnow()
+ db.add(api_key)
+ db.commit()
+ db.refresh(api_key)
+ return api_key
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/services/chat_service.py b/rag-web-ui/backend/app/services/chat_service.py
new file mode 100644
index 0000000..01a2238
--- /dev/null
+++ b/rag-web-ui/backend/app/services/chat_service.py
@@ -0,0 +1,532 @@
+import base64
+import json
+import re
+from collections import defaultdict
+from typing import Any, AsyncGenerator, Dict, List, Optional
+
+from app.core.config import settings
+from app.models.chat import Message
+from app.models.knowledge import Document, KnowledgeBase
+from app.services.embedding.embedding_factory import EmbeddingsFactory
+from app.services.fusion_prompts import (
+ GENERAL_CHAT_PROMPT_TEMPLATE,
+ GRAPH_GLOBAL_PROMPT_TEMPLATE,
+ GRAPH_LOCAL_PROMPT_TEMPLATE,
+ HYBRID_RAG_PROMPT_TEMPLATE,
+)
+from app.services.graph.graphrag_adapter import GraphRAGAdapter
+from app.services.intent_router import route_intent
+from app.services.llm.llm_factory import LLMFactory
+from app.services.reranker.external_api import ExternalRerankerClient
+from app.services.retrieval.multi_kb_retriever import MultiKBRetriever, format_retrieval_context
+from app.services.testing_pipeline.pipeline import run_testing_pipeline
+from app.services.testing_pipeline.rules import REQUIREMENT_TYPES
+from app.services.vector_store import VectorStoreFactory
+
+
+TESTING_TARGET_KEYWORDS = [
+ "测试项",
+ "测试用例",
+ "预期成果",
+ "需求类型",
+ "测试分解",
+ "分解",
+ "正常测试",
+ "异常测试",
+ "测试充分性",
+]
+
+TESTING_ACTION_KEYWORDS = [
+ "生成",
+ "输出",
+ "给出",
+ "写",
+ "编写",
+ "设计",
+ "整理",
+ "列出",
+ "提供",
+ "制定",
+]
+
+TYPE_ALIAS_MAP = {
+ "接口测试": "外部接口测试",
+ "ui测试": "人机交互界面测试",
+ "界面测试": "人机交互界面测试",
+ "恢复测试": "恢复性测试",
+ "可靠性": "可靠性测试",
+ "安全性": "安全性测试",
+ "边界": "边界测试",
+ "安装": "安装性测试",
+ "互操作": "互操作性测试",
+ "敏感性": "敏感性测试",
+ "充分性": "测试充分性要求",
+}
+
+
+def _escape_stream_text(text: str) -> str:
+ return text.replace('"', '\\"').replace("\n", "\\n")
+
+
+def _extract_stream_text(chunk: Any) -> str:
+ content = getattr(chunk, "content", chunk)
+
+ if isinstance(content, str):
+ return content
+
+ if isinstance(content, list):
+ parts: List[str] = []
+ for item in content:
+ if isinstance(item, str):
+ parts.append(item)
+ elif isinstance(item, dict):
+ maybe_text = item.get("text")
+ if isinstance(maybe_text, str):
+ parts.append(maybe_text)
+ else:
+ parts.append(str(item))
+ return "".join(parts)
+
+ return str(content)
+
+
+def _preview_rows(rows: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
+ preview = []
+ for row in rows[:10]:
+ doc = row["document"]
+ metadata = doc.metadata or {}
+ preview.append(
+ {
+ "kb_id": row.get("kb_id"),
+ "source": metadata.get("source") or metadata.get("file_name") or "unknown",
+ "chunk_id": metadata.get("chunk_id") or "unknown",
+ "score": row.get("final_score", 0),
+ "reranker_score": row.get("reranker_score"),
+ }
+ )
+ return preview
+
+
+def _context_rows(rows: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
+ context_rows: List[Dict[str, Any]] = []
+ for row in rows:
+ doc = row["document"]
+ metadata = dict(doc.metadata or {})
+
+ if "kb_id" not in metadata and row.get("kb_id") is not None:
+ metadata["kb_id"] = row.get("kb_id")
+ metadata.setdefault("retrieval_score", row.get("final_score", 0))
+ if row.get("reranker_score") is not None:
+ metadata.setdefault("reranker_score", row.get("reranker_score"))
+
+ context_rows.append(
+ {
+ "page_content": doc.page_content.strip(),
+ "metadata": metadata,
+ }
+ )
+ return context_rows
+
+
+def _build_local_graph_context_fallback(rows: List[Dict[str, Any]]) -> str:
+ entities = set()
+ relations: List[Dict[str, Any]] = []
+ evidences: List[str] = []
+
+ for row in rows:
+ doc = row["document"]
+ metadata = doc.metadata or {}
+
+ for ent in metadata.get("extracted_entities", []):
+ entities.add(str(ent))
+
+ for rel in metadata.get("extracted_relations", []):
+ if isinstance(rel, dict):
+ relations.append(rel)
+
+ evidences.append(doc.page_content.strip())
+
+ entity_block = "\n".join(f"- {name}" for name in sorted(entities)[:80]) or "- 暂无结构化实体,已使用向量检索回退。"
+
+ relation_lines: List[str] = []
+ for rel in relations[:120]:
+ src = rel.get("source") or rel.get("src") or rel.get("src_id") or "UNKNOWN"
+ tgt = rel.get("target") or rel.get("tgt") or rel.get("tgt_id") or "UNKNOWN"
+ rel_type = rel.get("type") or rel.get("relation_type") or "其他"
+ desc = rel.get("description") or ""
+ relation_lines.append(f"- {src} -> {tgt} | 类型={rel_type} | 说明={desc}")
+
+ relation_block = "\n".join(relation_lines) or "- 暂无结构化关系,已使用证据片段回答。"
+
+ evidence_block = "\n\n".join(
+ f"[证据{i}] {snippet}" for i, snippet in enumerate(evidences[:8], start=1)
+ )
+ if not evidence_block:
+ evidence_block = "无可用证据。"
+
+ return (
+ "实体列表:\n"
+ f"{entity_block}\n\n"
+ "关系列表:\n"
+ f"{relation_block}\n\n"
+ "原文证据:\n"
+ f"{evidence_block}"
+ )
+
+
+def _build_global_community_context_fallback(rows: List[Dict[str, Any]]) -> str:
+ groups: Dict[str, List[str]] = defaultdict(list)
+
+ for row in rows:
+ doc = row["document"]
+ metadata = doc.metadata or {}
+ community_ids = metadata.get("community_ids") or []
+
+ if isinstance(community_ids, list) and community_ids:
+ keys = [str(item) for item in community_ids]
+ else:
+ source = metadata.get("source") or metadata.get("file_name") or "unknown"
+ keys = [f"source:{source}"]
+
+ for key in keys:
+ groups[key].append(doc.page_content.strip())
+
+ if not groups:
+ return "暂无社区摘要数据,已回退为基于证据片段的全局总结。"
+
+ lines: List[str] = []
+ for idx, (community_id, snippets) in enumerate(groups.items(), start=1):
+ merged = " ".join(snippets[:3])
+ lines.append(f"社区{idx} ({community_id}) 摘要: {merged}")
+
+ return "\n\n".join(lines)
+
+
+async def _build_kb_vector_stores(db: Any, knowledge_bases: List[KnowledgeBase]) -> List[Dict[str, Any]]:
+ embeddings = EmbeddingsFactory.create()
+ kb_vector_stores: List[Dict[str, Any]] = []
+
+ for kb in knowledge_bases:
+ documents = db.query(Document).filter(Document.knowledge_base_id == kb.id).all()
+ if not documents:
+ continue
+
+ store = VectorStoreFactory.create(
+ store_type=settings.VECTOR_STORE_TYPE,
+ collection_name=f"kb_{kb.id}",
+ embedding_function=embeddings,
+ )
+ kb_vector_stores.append({"kb_id": kb.id, "store": store})
+
+ return kb_vector_stores
+
+
+def _build_reranker_client() -> ExternalRerankerClient:
+ return ExternalRerankerClient(
+ api_url=settings.RERANKER_API_URL,
+ api_key=settings.RERANKER_API_KEY,
+ model=settings.RERANKER_MODEL,
+ timeout_seconds=settings.RERANKER_TIMEOUT_SECONDS,
+ )
+
+
+def _is_testing_generation_request(query: str) -> bool:
+ text = (query or "").strip()
+ if not text:
+ return False
+
+ normalized = text.lower()
+ if normalized.startswith("/testing"):
+ return True
+
+ if any(
+ token in normalized
+ for token in (
+ "testing_orchestrator",
+ "testing-orchestrator",
+ "identify_requirement_type",
+ "identify-requirement-type",
+ )
+ ):
+ return True
+
+ has_target = any(keyword in text for keyword in TESTING_TARGET_KEYWORDS)
+ has_action = any(keyword in text for keyword in TESTING_ACTION_KEYWORDS)
+ if has_target and has_action:
+ return True
+
+ if any(keyword in text for keyword in ("测试项", "测试用例", "预期成果")):
+ if re.search(r"(请|帮|给|麻烦).{0,12}(写|生成|设计|整理|编写|列出|提供|制定)", text):
+ return True
+ if text.startswith(("生成", "编写", "设计", "整理", "输出", "列出", "提供", "制定")):
+ return True
+
+ return False
+
+
+def _extract_requirement_type_from_query(query: str) -> Optional[str]:
+ text = (query or "").strip()
+ if not text:
+ return None
+
+ for req_type in REQUIREMENT_TYPES:
+ if req_type in text:
+ return req_type
+
+ lowered = text.lower()
+ for alias, req_type in TYPE_ALIAS_MAP.items():
+ if alias in text or alias in lowered:
+ return req_type
+
+ return None
+
+
+async def generate_response(
+ query: str,
+ messages: dict,
+ knowledge_base_ids: List[int],
+ chat_id: int,
+ db: Any,
+) -> AsyncGenerator[str, None]:
+ try:
+ user_message = Message(content=query, role="user", chat_id=chat_id)
+ db.add(user_message)
+ db.commit()
+
+ bot_message = Message(content="", role="assistant", chat_id=chat_id)
+ db.add(bot_message)
+ db.commit()
+
+ if _is_testing_generation_request(query):
+ explicit_type = _extract_requirement_type_from_query(query)
+
+ retrieval_rows: List[Dict[str, Any]] = []
+ knowledge_context = ""
+ kb_vector_stores = []
+ if knowledge_base_ids:
+ testing_kbs = (
+ db.query(KnowledgeBase)
+ .filter(KnowledgeBase.id.in_(knowledge_base_ids))
+ .all()
+ )
+ kb_vector_stores = await _build_kb_vector_stores(db, testing_kbs)
+
+ if kb_vector_stores:
+ testing_retriever = MultiKBRetriever(
+ reranker_weight=settings.RERANKER_WEIGHT,
+ )
+ retrieval_rows = await testing_retriever.retrieve(
+ query=query,
+ kb_vector_stores=kb_vector_stores,
+ fetch_k_per_kb=16,
+ top_k=8,
+ )
+ if retrieval_rows:
+ knowledge_context = format_retrieval_context(retrieval_rows)
+
+ pipeline_result = run_testing_pipeline(
+ user_requirement_text=query,
+ requirement_type_input=explicit_type,
+ debug=True,
+ knowledge_context=knowledge_context,
+ use_model_generation=True,
+ max_items_per_group=6,
+ cases_per_item=1,
+ max_focus_points=6,
+ max_llm_calls=2,
+ )
+
+ context_payload = {
+ "route": {
+ "intent": "TESTING",
+ "reason": "命中测试生成意图,已自动调用测试工具链。",
+ },
+ "intent": "TESTING",
+ "skill_profile": "testing-orchestrator",
+ "tool_chain": [
+ "identify-requirement-type",
+ "decompose-test-items",
+ "generate-test-cases",
+ "build_expected_results",
+ "format_output",
+ ],
+ "selected_chain": "TESTING_PIPELINE",
+ "graph_used": False,
+ "reranker_enabled": False,
+ "retrieval_preview": _preview_rows(retrieval_rows),
+ "context": _context_rows(retrieval_rows),
+ "testing_pipeline": {
+ "trace_id": pipeline_result.get("trace_id"),
+ "requirement_type": pipeline_result.get("requirement_type"),
+ "candidates": pipeline_result.get("candidates", []),
+ "pipeline_summary": pipeline_result.get("pipeline_summary", ""),
+ "knowledge_used": pipeline_result.get("knowledge_used", False),
+ "step_logs": pipeline_result.get("step_logs", []),
+ },
+ }
+
+ escaped_context = json.dumps(context_payload, ensure_ascii=False)
+ base64_context = base64.b64encode(escaped_context.encode()).decode()
+ separator = "__LLM_RESPONSE__"
+
+ full_response = f"{base64_context}{separator}"
+ yield f'0:"{base64_context}{separator}"\n'
+
+ rendered_text = pipeline_result.get("formatted_output", "").strip()
+ if not rendered_text:
+ rendered_text = "未生成测试内容,请补充更明确的需求后重试。"
+
+ full_response += rendered_text
+ yield f'0:"{_escape_stream_text(rendered_text)}"\n'
+ yield 'd:{"finishReason":"stop","usage":{"promptTokens":0,"completionTokens":0}}\n'
+
+ bot_message.content = full_response
+ db.commit()
+ return
+
+ knowledge_bases = (
+ db.query(KnowledgeBase)
+ .filter(KnowledgeBase.id.in_(knowledge_base_ids))
+ .all()
+ )
+ kb_ids = [kb.id for kb in knowledge_bases]
+
+ llm = LLMFactory.create()
+ decision = await route_intent(llm=llm, query=query, messages=messages)
+ intent = decision["intent"]
+
+ kb_vector_stores = await _build_kb_vector_stores(db, knowledge_bases)
+ if intent in {"B", "C", "D"} and not kb_vector_stores:
+ intent = "A"
+ decision = {
+ "intent": "A",
+ "reason": "未发现可用知识库向量集合,已降级为通用对话路。",
+ }
+
+ reranker_client = _build_reranker_client()
+ retriever = MultiKBRetriever(
+ reranker_client=reranker_client,
+ reranker_weight=settings.RERANKER_WEIGHT,
+ )
+
+ retrieval_rows: List[Dict[str, Any]] = []
+ graph_used = False
+ selected_chain = intent
+ prompt_text = ""
+
+ if intent == "A":
+ prompt_text = GENERAL_CHAT_PROMPT_TEMPLATE.format(query=query)
+
+ elif intent == "B":
+ retrieval_rows = await retriever.retrieve(
+ query=query,
+ kb_vector_stores=kb_vector_stores,
+ fetch_k_per_kb=16,
+ top_k=12,
+ )
+ context = format_retrieval_context(retrieval_rows) or "无可用证据。"
+ prompt_text = HYBRID_RAG_PROMPT_TEMPLATE.format(query=query, context=context)
+
+ elif intent == "C":
+ graph_context = ""
+ used_kb_ids: List[int] = []
+ if settings.GRAPHRAG_ENABLED and kb_ids:
+ try:
+ adapter = GraphRAGAdapter()
+ graph_context, used_kb_ids = await adapter.local_context_multi(
+ kb_ids,
+ query,
+ top_k=settings.GRAPHRAG_LOCAL_TOP_K,
+ level=settings.GRAPHRAG_QUERY_LEVEL,
+ )
+ graph_used = bool(graph_context)
+ except Exception:
+ graph_context = ""
+
+ if not graph_context:
+ retrieval_rows = await retriever.retrieve(
+ query=query,
+ kb_vector_stores=kb_vector_stores,
+ fetch_k_per_kb=18,
+ top_k=14,
+ )
+ graph_context = _build_local_graph_context_fallback(retrieval_rows)
+ selected_chain = "C_fallback_B"
+
+ else:
+ selected_chain = "C_graph"
+
+ prompt_text = GRAPH_LOCAL_PROMPT_TEMPLATE.format(
+ query=query,
+ graph_context=graph_context,
+ )
+
+ else:
+ community_context = ""
+ if settings.GRAPHRAG_ENABLED and kb_ids:
+ try:
+ adapter = GraphRAGAdapter()
+ community_context, used_kb_ids = await adapter.global_context_multi(
+ kb_ids,
+ query,
+ level=settings.GRAPHRAG_QUERY_LEVEL,
+ )
+ graph_used = bool(community_context)
+ except Exception:
+ community_context = ""
+
+ if not community_context:
+ retrieval_rows = await retriever.retrieve(
+ query=query,
+ kb_vector_stores=kb_vector_stores,
+ fetch_k_per_kb=20,
+ top_k=14,
+ )
+ community_context = _build_global_community_context_fallback(retrieval_rows)
+ selected_chain = "D_fallback_B"
+ else:
+ selected_chain = "D_graph"
+
+ prompt_text = GRAPH_GLOBAL_PROMPT_TEMPLATE.format(
+ query=query,
+ community_context=community_context,
+ )
+
+ context_payload = {
+ "route": decision,
+ "intent": intent,
+ "selected_chain": selected_chain,
+ "graph_used": graph_used,
+ "reranker_enabled": reranker_client.enabled,
+ "retrieval_preview": _preview_rows(retrieval_rows),
+ "context": _context_rows(retrieval_rows),
+ }
+ escaped_context = json.dumps(context_payload, ensure_ascii=False)
+ base64_context = base64.b64encode(escaped_context.encode()).decode()
+ separator = "__LLM_RESPONSE__"
+
+ full_response = f"{base64_context}{separator}"
+ yield f'0:"{base64_context}{separator}"\n'
+
+ async for chunk in llm.astream(prompt_text):
+ text = _extract_stream_text(chunk)
+ if not text:
+ continue
+ full_response += text
+ yield f'0:"{_escape_stream_text(text)}"\n'
+
+ yield 'd:{"finishReason":"stop","usage":{"promptTokens":0,"completionTokens":0}}\n'
+
+ bot_message.content = full_response
+ db.commit()
+
+ except Exception as e:
+ error_message = f"Error generating response: {str(e)}"
+ print(error_message)
+ yield "3:{text}\n".format(text=error_message)
+
+ if "bot_message" in locals():
+ bot_message.content = error_message
+ db.commit()
+ finally:
+ db.close()
diff --git a/rag-web-ui/backend/app/services/chunk_record.py b/rag-web-ui/backend/app/services/chunk_record.py
new file mode 100644
index 0000000..2979245
--- /dev/null
+++ b/rag-web-ui/backend/app/services/chunk_record.py
@@ -0,0 +1,69 @@
+from typing import Optional, List, Dict, Set
+from sqlalchemy import create_engine, text
+from sqlalchemy.orm import Session
+from app.core.config import settings
+from app.models.knowledge import DocumentChunk
+import json
+
+class ChunkRecord:
+ """Manages chunk-level record keeping for incremental updates"""
+ def __init__(self, kb_id: int):
+ self.kb_id = kb_id
+ self.engine = create_engine(settings.get_database_url)
+
+ def list_chunks(self, file_name: Optional[str] = None) -> Set[str]:
+ """List all chunk hashes for the given file"""
+ with Session(self.engine) as session:
+ query = session.query(DocumentChunk.hash).filter(
+ DocumentChunk.kb_id == self.kb_id
+ )
+
+ if file_name:
+ query = query.filter(DocumentChunk.file_name == file_name)
+
+ return {row[0] for row in query.all()}
+
+ def add_chunks(self, chunks: List[Dict]):
+ """Add new chunks to the database"""
+ if not chunks:
+ return
+
+ with Session(self.engine) as session:
+ for chunk_data in chunks:
+ chunk = DocumentChunk(
+ id=chunk_data['id'],
+ kb_id=chunk_data['kb_id'],
+ document_id=chunk_data['document_id'],
+ file_name=chunk_data['file_name'],
+ chunk_metadata=chunk_data['metadata'],
+ hash=chunk_data['hash']
+ )
+ session.merge(chunk) # Use merge instead of add to handle updates
+ session.commit()
+
+ def delete_chunks(self, chunk_ids: List[str]):
+ """Delete chunks by their IDs"""
+ if not chunk_ids:
+ return
+
+ with Session(self.engine) as session:
+ session.query(DocumentChunk).filter(
+ DocumentChunk.kb_id == self.kb_id,
+ DocumentChunk.id.in_(chunk_ids)
+ ).delete(synchronize_session=False)
+ session.commit()
+
+ def get_deleted_chunks(self, current_hashes: Set[str], file_name: Optional[str] = None) -> List[str]:
+ """Get IDs of chunks that no longer exist in the current version"""
+ with Session(self.engine) as session:
+ query = session.query(DocumentChunk.id).filter(
+ DocumentChunk.kb_id == self.kb_id
+ )
+
+ if file_name:
+ query = query.filter(DocumentChunk.file_name == file_name)
+
+ if current_hashes:
+ query = query.filter(DocumentChunk.hash.notin_(current_hashes))
+
+ return [row[0] for row in query.all()]
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/services/document_processor.py b/rag-web-ui/backend/app/services/document_processor.py
new file mode 100644
index 0000000..cca6f79
--- /dev/null
+++ b/rag-web-ui/backend/app/services/document_processor.py
@@ -0,0 +1,582 @@
+import logging
+import os
+import hashlib
+import tempfile
+import traceback
+import json
+from app.db.session import SessionLocal
+from io import BytesIO
+from typing import Optional, List, Dict, Any
+from fastapi import UploadFile
+from langchain_community.document_loaders import (
+ PyPDFLoader,
+ Docx2txtLoader,
+ UnstructuredMarkdownLoader,
+ TextLoader
+)
+from langchain.text_splitter import RecursiveCharacterTextSplitter
+from langchain_core.documents import Document as LangchainDocument
+from pydantic import BaseModel
+from sqlalchemy.orm import Session
+from app.core.config import settings
+from app.core.minio import get_minio_client
+from app.models.knowledge import ProcessingTask, Document, DocumentChunk
+from app.services.chunk_record import ChunkRecord
+from minio.error import MinioException
+from minio.commonconfig import CopySource
+from app.services.vector_store import VectorStoreFactory
+from app.services.embedding.embedding_factory import EmbeddingsFactory
+
+class UploadResult(BaseModel):
+ file_path: str
+ file_name: str
+ file_size: int
+ content_type: str
+ file_hash: str
+
+class TextChunk(BaseModel):
+ content: str
+ metadata: Optional[Dict] = None
+
+class PreviewResult(BaseModel):
+ chunks: List[TextChunk]
+ total_chunks: int
+
+
+def _estimate_token_count(text: str) -> int:
+ # Lightweight estimation without adding tokenizer dependencies.
+ return len(text)
+
+
+def _build_enriched_chunk_metadata(
+ *,
+ source_metadata: Optional[Dict[str, Any]],
+ chunk_id: str,
+ file_name: str,
+ file_path: str,
+ kb_id: int,
+ document_id: int,
+ chunk_index: int,
+ chunk_text: str,
+) -> Dict[str, Any]:
+ source_metadata = source_metadata or {}
+ token_count = _estimate_token_count(chunk_text)
+
+ return {
+ **source_metadata,
+ "source": file_name,
+ "chunk_id": chunk_id,
+ "file_name": file_name,
+ "file_path": file_path,
+ "kb_id": kb_id,
+ "document_id": document_id,
+ "chunk_index": chunk_index,
+ "chunk_text": chunk_text,
+ "token_count": token_count,
+ "language": source_metadata.get("language", "zh"),
+ "source_type": "document",
+ "mission_phase": source_metadata.get("mission_phase"),
+ "section_title": source_metadata.get("section_title"),
+ "publish_time": source_metadata.get("publish_time"),
+ # Keep graph-linked fields for future graph/vector federation.
+ "extracted_entities": source_metadata.get("extracted_entities", []),
+ "extracted_entity_types": source_metadata.get("extracted_entity_types", []),
+ "extracted_relations": source_metadata.get("extracted_relations", []),
+ "graph_node_ids": source_metadata.get("graph_node_ids", []),
+ "graph_edge_ids": source_metadata.get("graph_edge_ids", []),
+ "community_ids": source_metadata.get("community_ids", []),
+ }
+
+
+def _sanitize_metadata_for_vector_store(metadata: Optional[Dict[str, Any]]) -> Dict[str, Any]:
+ """Normalize metadata to satisfy Chroma's strict metadata constraints."""
+ if not metadata:
+ return {}
+
+ sanitized: Dict[str, Any] = {}
+ scalar_types = (str, int, float, bool)
+
+ for key, value in metadata.items():
+ if value is None:
+ continue
+
+ if isinstance(value, scalar_types):
+ sanitized[key] = value
+ continue
+
+ if isinstance(value, list):
+ primitive_items = [item for item in value if isinstance(item, scalar_types)]
+ if primitive_items:
+ sanitized[key] = primitive_items
+ elif value:
+ sanitized[key] = json.dumps(value, ensure_ascii=False)
+ continue
+
+ if isinstance(value, dict):
+ sanitized[key] = json.dumps(value, ensure_ascii=False)
+ continue
+
+ sanitized[key] = str(value)
+
+ return sanitized
+
+async def process_document(file_path: str, file_name: str, kb_id: int, document_id: int, chunk_size: int = 1000, chunk_overlap: int = 200) -> None:
+ """Process document and store in vector database with incremental updates"""
+ logger = logging.getLogger(__name__)
+
+ try:
+ preview_result = await preview_document(file_path, chunk_size, chunk_overlap)
+
+ # Initialize embeddings
+ logger.info("Initializing OpenAI embeddings...")
+ embeddings = EmbeddingsFactory.create()
+
+ logger.info(f"Initializing vector store with collection: kb_{kb_id}")
+ vector_store = VectorStoreFactory.create(
+ store_type=settings.VECTOR_STORE_TYPE,
+ collection_name=f"kb_{kb_id}",
+ embedding_function=embeddings,
+ )
+
+ # Initialize chunk record manager
+ chunk_manager = ChunkRecord(kb_id)
+
+ # Get existing chunk hashes for this file
+ existing_hashes = chunk_manager.list_chunks(file_name)
+
+ # Prepare new chunks
+ new_chunks = []
+ current_hashes = set()
+ documents_to_update = []
+
+ for i, chunk in enumerate(preview_result.chunks):
+ # Calculate chunk hash
+ chunk_hash = hashlib.sha256(
+ (chunk.content + str(chunk.metadata)).encode()
+ ).hexdigest()
+ current_hashes.add(chunk_hash)
+
+ # Skip if chunk hasn't changed
+ if chunk_hash in existing_hashes:
+ continue
+
+ # Create unique ID for the chunk
+ chunk_id = hashlib.sha256(
+ f"{kb_id}:{file_name}:{chunk_hash}".encode()
+ ).hexdigest()
+
+ metadata = _build_enriched_chunk_metadata(
+ source_metadata=chunk.metadata,
+ chunk_id=chunk_id,
+ file_name=file_name,
+ file_path=file_path,
+ kb_id=kb_id,
+ document_id=document_id,
+ chunk_index=i,
+ chunk_text=chunk.content,
+ )
+ vector_metadata = _sanitize_metadata_for_vector_store(metadata)
+
+ new_chunks.append({
+ "id": chunk_id,
+ "kb_id": kb_id,
+ "document_id": document_id,
+ "file_name": file_name,
+ "metadata": metadata,
+ "hash": chunk_hash
+ })
+
+ # Prepare document for vector store
+ doc = LangchainDocument(
+ page_content=chunk.content,
+ metadata=vector_metadata
+ )
+ documents_to_update.append(doc)
+
+ # Add new chunks to database and vector store
+ if new_chunks:
+ logger.info(f"Adding {len(new_chunks)} new/updated chunks")
+ chunk_manager.add_chunks(new_chunks)
+ vector_store.add_documents(documents_to_update)
+ if settings.GRAPHRAG_ENABLED:
+ try:
+ from app.services.graph.graphrag_adapter import GraphRAGAdapter
+
+ graph_adapter = GraphRAGAdapter()
+ source_texts = [doc.page_content for doc in documents_to_update if doc.page_content.strip()]
+ await graph_adapter.ingest_texts(kb_id, source_texts)
+ logger.info("GraphRAG ingestion completed in incremental processing")
+ except Exception as graph_exc:
+ logger.error(f"GraphRAG ingestion failed in incremental processing: {graph_exc}")
+
+ # Delete removed chunks
+ chunks_to_delete = chunk_manager.get_deleted_chunks(current_hashes, file_name)
+ if chunks_to_delete:
+ logger.info(f"Removing {len(chunks_to_delete)} deleted chunks")
+ chunk_manager.delete_chunks(chunks_to_delete)
+ vector_store.delete(chunks_to_delete)
+
+ logger.info("Document processing completed successfully")
+
+ except Exception as e:
+ logger.error(f"Error processing document: {str(e)}")
+ raise
+
+async def upload_document(file: UploadFile, kb_id: int) -> UploadResult:
+ """Step 1: Upload document to MinIO"""
+ content = await file.read()
+ file_size = len(content)
+
+ file_hash = hashlib.sha256(content).hexdigest()
+
+ # Clean and normalize filename
+ file_name = "".join(c for c in file.filename if c.isalnum() or c in ('-', '_', '.')).strip()
+ object_path = f"kb_{kb_id}/{file_name}"
+
+ content_types = {
+ ".pdf": "application/pdf",
+ ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ ".md": "text/markdown",
+ ".txt": "text/plain"
+ }
+
+ _, ext = os.path.splitext(file_name)
+ content_type = content_types.get(ext.lower(), "application/octet-stream")
+
+ # Upload to MinIO
+ minio_client = get_minio_client()
+ try:
+ minio_client.put_object(
+ bucket_name=settings.MINIO_BUCKET_NAME,
+ object_name=object_path,
+ data=BytesIO(content),
+ length=file_size,
+ content_type=content_type
+ )
+ except Exception as e:
+ logging.error(f"Failed to upload file to MinIO: {str(e)}")
+ raise
+
+ return UploadResult(
+ file_path=object_path,
+ file_name=file_name,
+ file_size=file_size,
+ content_type=content_type,
+ file_hash=file_hash
+ )
+
+async def preview_document(file_path: str, chunk_size: int = 1000, chunk_overlap: int = 200) -> PreviewResult:
+ """Step 2: Generate preview chunks"""
+ # Get file from MinIO
+ minio_client = get_minio_client()
+ _, ext = os.path.splitext(file_path)
+ ext = ext.lower()
+
+ # Download to temp file
+ with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as temp_file:
+ minio_client.fget_object(
+ bucket_name=settings.MINIO_BUCKET_NAME,
+ object_name=file_path,
+ file_path=temp_file.name
+ )
+ temp_path = temp_file.name
+
+ try:
+ # Select appropriate loader
+ if ext == ".pdf":
+ loader = PyPDFLoader(temp_path)
+ elif ext == ".docx":
+ loader = Docx2txtLoader(temp_path)
+ elif ext == ".md":
+ loader = UnstructuredMarkdownLoader(temp_path)
+ else: # Default to text loader
+ loader = TextLoader(temp_path)
+
+ # Load and split the document
+ documents = loader.load()
+ text_splitter = RecursiveCharacterTextSplitter(
+ chunk_size=chunk_size,
+ chunk_overlap=chunk_overlap
+ )
+ chunks = text_splitter.split_documents(documents)
+
+ # Convert to preview format
+ preview_chunks = [
+ TextChunk(
+ content=chunk.page_content,
+ metadata=chunk.metadata
+ )
+ for chunk in chunks
+ ]
+
+ return PreviewResult(
+ chunks=preview_chunks,
+ total_chunks=len(chunks)
+ )
+ finally:
+ os.unlink(temp_path)
+
+async def process_document_background(
+ temp_path: str,
+ file_name: str,
+ kb_id: int,
+ task_id: int,
+ db: Session = None,
+ chunk_size: int = 1000,
+ chunk_overlap: int = 200
+) -> None:
+ """Process document in background"""
+ logger = logging.getLogger(__name__)
+ logger.info(f"Starting background processing for task {task_id}, file: {file_name}")
+
+ # if we don't pass in db, create a new database session
+ if db is None:
+ db = SessionLocal()
+ should_close_db = True
+ else:
+ should_close_db = False
+
+ task = db.query(ProcessingTask).get(task_id)
+ if not task:
+ logger.error(f"Task {task_id} not found")
+ return
+
+ minio_client = None
+ local_temp_path = None
+
+ try:
+ logger.info(f"Task {task_id}: Setting status to processing")
+ task.status = "processing"
+ db.commit()
+
+ # 1. 从临时目录下载文件
+ minio_client = get_minio_client()
+ try:
+ local_temp_path = f"/tmp/temp_{task_id}_{file_name}" # 使用系统临时目录
+ logger.info(f"Task {task_id}: Downloading file from MinIO: {temp_path} to {local_temp_path}")
+ minio_client.fget_object(
+ bucket_name=settings.MINIO_BUCKET_NAME,
+ object_name=temp_path,
+ file_path=local_temp_path
+ )
+ logger.info(f"Task {task_id}: File downloaded successfully")
+ except MinioException as e:
+ # Idempotent fallback: temp object may already be consumed by another task.
+ # If the final document is already created, treat current task as completed.
+ if "NoSuchKey" in str(e) and task.document_upload:
+ existing_document = db.query(Document).filter(
+ Document.knowledge_base_id == kb_id,
+ Document.file_name == file_name,
+ Document.file_hash == task.document_upload.file_hash,
+ ).first()
+ if existing_document:
+ logger.warning(
+ f"Task {task_id}: Temp object missing but document already exists, "
+ f"marking task as completed (document_id={existing_document.id})"
+ )
+ task.status = "completed"
+ task.document_id = existing_document.id
+ task.error_message = None
+ task.document_upload.status = "completed"
+ task.document_upload.error_message = None
+ db.commit()
+ return
+
+ error_msg = f"Failed to download temp file: {str(e)}"
+ logger.error(f"Task {task_id}: {error_msg}")
+ raise Exception(error_msg)
+
+ try:
+ # 2. 加载和分块文档
+ _, ext = os.path.splitext(file_name)
+ ext = ext.lower()
+
+ logger.info(f"Task {task_id}: Loading document with extension {ext}")
+ # 选择合适的加载器
+ if ext == ".pdf":
+ loader = PyPDFLoader(local_temp_path)
+ elif ext == ".docx":
+ loader = Docx2txtLoader(local_temp_path)
+ elif ext == ".md":
+ loader = UnstructuredMarkdownLoader(local_temp_path)
+ else: # 默认使用文本加载器
+ loader = TextLoader(local_temp_path)
+
+ logger.info(f"Task {task_id}: Loading document content")
+ documents = loader.load()
+ logger.info(f"Task {task_id}: Document loaded successfully")
+
+ logger.info(f"Task {task_id}: Splitting document into chunks")
+ text_splitter = RecursiveCharacterTextSplitter(
+ chunk_size=chunk_size,
+ chunk_overlap=chunk_overlap
+ )
+ chunks = text_splitter.split_documents(documents)
+ logger.info(f"Task {task_id}: Document split into {len(chunks)} chunks")
+
+ # 3. 创建向量存储
+ logger.info(f"Task {task_id}: Initializing vector store")
+ embeddings = EmbeddingsFactory.create()
+
+ vector_store = VectorStoreFactory.create(
+ store_type=settings.VECTOR_STORE_TYPE,
+ collection_name=f"kb_{kb_id}",
+ embedding_function=embeddings,
+ )
+
+ # 4. 将临时文件移动到永久目录
+ permanent_path = f"kb_{kb_id}/{file_name}"
+ try:
+ logger.info(f"Task {task_id}: Moving file to permanent storage")
+ # 复制到永久目录
+ source = CopySource(settings.MINIO_BUCKET_NAME, temp_path)
+ minio_client.copy_object(
+ bucket_name=settings.MINIO_BUCKET_NAME,
+ object_name=permanent_path,
+ source=source
+ )
+ logger.info(f"Task {task_id}: File moved to permanent storage")
+
+ # 删除临时文件
+ logger.info(f"Task {task_id}: Removing temporary file from MinIO")
+ minio_client.remove_object(
+ bucket_name=settings.MINIO_BUCKET_NAME,
+ object_name=temp_path
+ )
+ logger.info(f"Task {task_id}: Temporary file removed")
+ except MinioException as e:
+ error_msg = f"Failed to move file to permanent storage: {str(e)}"
+ logger.error(f"Task {task_id}: {error_msg}")
+ raise Exception(error_msg)
+
+ # 5. 创建文档记录
+ logger.info(f"Task {task_id}: Creating document record")
+ document = Document(
+ file_name=file_name,
+ file_path=permanent_path,
+ file_hash=task.document_upload.file_hash,
+ file_size=task.document_upload.file_size,
+ content_type=task.document_upload.content_type,
+ knowledge_base_id=kb_id
+ )
+ db.add(document)
+ db.flush()
+ db.refresh(document)
+ logger.info(f"Task {task_id}: Document record created with ID {document.id}")
+
+ # 6. 存储文档块
+ logger.info(f"Task {task_id}: Storing document chunks")
+ for i, chunk in enumerate(chunks):
+ # 为每个 chunk 生成唯一的 ID
+ chunk_id = hashlib.sha256(
+ f"{kb_id}:{file_name}:{chunk.page_content}".encode()
+ ).hexdigest()
+
+ metadata = _build_enriched_chunk_metadata(
+ source_metadata=chunk.metadata,
+ chunk_id=chunk_id,
+ file_name=file_name,
+ file_path=permanent_path,
+ kb_id=kb_id,
+ document_id=document.id,
+ chunk_index=i,
+ chunk_text=chunk.page_content,
+ )
+ chunk.metadata = metadata
+
+ doc_chunk = DocumentChunk(
+ id=chunk_id, # 添加 ID 字段
+ document_id=document.id,
+ kb_id=kb_id,
+ file_name=file_name,
+ chunk_metadata={
+ "page_content": chunk.page_content,
+ **metadata
+ },
+ hash=hashlib.sha256(
+ (chunk.page_content + str(metadata)).encode()
+ ).hexdigest()
+ )
+ db.add(doc_chunk)
+ if i > 0 and i % 100 == 0:
+ logger.info(f"Task {task_id}: Stored {i} chunks")
+ db.flush()
+
+ # 7. 添加到向量存储
+ logger.info(f"Task {task_id}: Adding chunks to vector store")
+ vector_chunks = [
+ LangchainDocument(
+ page_content=chunk.page_content,
+ metadata=_sanitize_metadata_for_vector_store(chunk.metadata),
+ )
+ for chunk in chunks
+ ]
+ vector_store.add_documents(vector_chunks)
+ # 移除 persist() 调用,因为新版本不需要
+ logger.info(f"Task {task_id}: Chunks added to vector store")
+
+ if settings.GRAPHRAG_ENABLED:
+ try:
+ from app.services.graph.graphrag_adapter import GraphRAGAdapter
+
+ logger.info(f"Task {task_id}: Starting GraphRAG ingestion")
+ graph_adapter = GraphRAGAdapter()
+ source_texts = [doc.page_content for doc in documents if doc.page_content.strip()]
+ await graph_adapter.ingest_texts(kb_id, source_texts)
+ logger.info(f"Task {task_id}: GraphRAG ingestion completed")
+ except Exception as graph_exc:
+ logger.error(f"Task {task_id}: GraphRAG ingestion failed: {graph_exc}")
+
+ # 8. 更新任务状态
+ logger.info(f"Task {task_id}: Updating task status to completed")
+ task.status = "completed"
+ task.document_id = document.id # 更新为新创建的文档ID
+
+ # 9. 更新上传记录状态
+ upload = task.document_upload # 直接通过关系获取
+ if upload:
+ logger.info(f"Task {task_id}: Updating upload record status to completed")
+ upload.status = "completed"
+
+ db.commit()
+ logger.info(f"Task {task_id}: Processing completed successfully")
+
+ finally:
+ # 清理本地临时文件
+ try:
+ if os.path.exists(local_temp_path):
+ logger.info(f"Task {task_id}: Cleaning up local temp file")
+ os.remove(local_temp_path)
+ logger.info(f"Task {task_id}: Local temp file cleaned up")
+ except Exception as e:
+ logger.warning(f"Task {task_id}: Failed to clean up local temp file: {str(e)}")
+
+ except Exception as e:
+ logger.error(f"Task {task_id}: Error processing document: {str(e)}")
+ logger.error(f"Task {task_id}: Stack trace: {traceback.format_exc()}")
+ db.rollback()
+
+ failed_task = db.query(ProcessingTask).get(task_id)
+ if failed_task:
+ failed_task.status = "failed"
+ failed_task.error_message = str(e)
+ if failed_task.document_upload:
+ failed_task.document_upload.status = "failed"
+ failed_task.document_upload.error_message = str(e)
+ db.commit()
+
+ # 清理临时文件
+ try:
+ logger.info(f"Task {task_id}: Cleaning up temporary file after error")
+ if minio_client is not None:
+ minio_client.remove_object(
+ bucket_name=settings.MINIO_BUCKET_NAME,
+ object_name=temp_path
+ )
+ logger.info(f"Task {task_id}: Temporary file cleaned up after error")
+ except:
+ logger.warning(f"Task {task_id}: Failed to clean up temporary file after error")
+ finally:
+ # if we create the db session, we need to close it
+ if should_close_db and db:
+ db.close()
diff --git a/rag-web-ui/backend/app/services/embedding/embedding_factory.py b/rag-web-ui/backend/app/services/embedding/embedding_factory.py
new file mode 100644
index 0000000..313c89d
--- /dev/null
+++ b/rag-web-ui/backend/app/services/embedding/embedding_factory.py
@@ -0,0 +1,46 @@
+from app.core.config import settings
+from langchain_openai import OpenAIEmbeddings
+from langchain_ollama import OllamaEmbeddings
+# If you plan on adding other embeddings, import them here
+# from some_other_module import AnotherEmbeddingClass
+
+
+class EmbeddingsFactory:
+ @staticmethod
+ def create():
+ """
+ Factory method to create an embeddings instance based on .env config.
+ """
+ # Suppose your .env has a value like EMBEDDINGS_PROVIDER=openai
+ embeddings_provider = settings.EMBEDDINGS_PROVIDER.lower()
+
+ if embeddings_provider == "openai":
+ return OpenAIEmbeddings(
+ openai_api_key=settings.OPENAI_API_KEY,
+ openai_api_base=settings.OPENAI_API_BASE,
+ model=settings.OPENAI_EMBEDDINGS_MODEL
+ )
+ elif embeddings_provider == "dashscope":
+ return OpenAIEmbeddings(
+ openai_api_key=settings.DASH_SCOPE_API_KEY,
+ openai_api_base=settings.DASH_SCOPE_API_BASE,
+ model=settings.DASH_SCOPE_EMBEDDINGS_MODEL,
+ # DashScope OpenAI-compatible embedding expects string input,
+ # while LangChain's len-safe path may send token ids.
+ check_embedding_ctx_length=False,
+ tiktoken_enabled=False,
+ skip_empty=True,
+ # DashScope embedding API supports at most 10 inputs per batch.
+ chunk_size=10,
+ )
+ elif embeddings_provider == "ollama":
+ return OllamaEmbeddings(
+ model=settings.OLLAMA_EMBEDDINGS_MODEL,
+ base_url=settings.OLLAMA_API_BASE
+ )
+
+ # Extend with other providers:
+ # elif embeddings_provider == "another_provider":
+ # return AnotherEmbeddingClass(...)
+ else:
+ raise ValueError(f"Unsupported embeddings provider: {embeddings_provider}")
diff --git a/rag-web-ui/backend/app/services/fusion_prompts.py b/rag-web-ui/backend/app/services/fusion_prompts.py
new file mode 100644
index 0000000..5411927
--- /dev/null
+++ b/rag-web-ui/backend/app/services/fusion_prompts.py
@@ -0,0 +1,116 @@
+"""Fusion RAG prompts for aerospace Chinese QA."""
+
+ROUTER_SYSTEM_PROMPT = """
+你是一个检索路由器。你的唯一任务是把用户请求分类到以下四类之一。
+
+分类标签:
+A: 通用对话路
+- 适用:问候、寒暄、角色扮演、无须知识库支持的常识闲聊。
+- 特征:没有明确的专业实体约束,也不依赖当前知识库文档。
+
+B: 混合检索路 (Hybrid RAG)
+- 适用:单实体事实查询、定义解释、时间/数值/指标问答。
+- 特征:问题通常可由少量文本片段直接回答,核心是“找准证据”。
+
+C: 局部图检索路 (Graph Local Search)
+- 适用:实体关系、多跳因果、组件依赖、跨段落链式推理。
+- 特征:问题包含“谁影响谁/为什么/如何传导/依赖链”。
+
+D: 全局图检索路 (Graph Global Search)
+- 适用:全局总结、趋势分析、跨系统比较、宏观评估。
+- 特征:问题面向整个语料或多个主题社区,不是单点事实。
+
+判定规则(按优先级):
+1. 若请求明确是问候、寒暄、开放闲聊,判 A。
+2. 若请求强调全局综述、趋势、横向比较,判 D。
+3. 若请求强调实体关系、影响路径、多跳推理,判 C。
+4. 其余知识查询默认判 B。
+
+输出要求:
+- 只能输出 JSON,不要额外文本。
+- 格式必须是:
+{
+ "intent": "A/B/C/D",
+ "reason": "中文简要理由"
+}
+""".strip()
+
+ROUTER_USER_PROMPT_TEMPLATE = """
+请基于以下用户问题进行路由分类。
+
+历史对话(可选):
+{chat_history}
+
+用户问题:
+{query}
+""".strip()
+
+GENERAL_CHAT_PROMPT_TEMPLATE = """
+你是中文航天问答助手。当前请求被路由为“通用对话路”。
+请直接回答用户问题,要求:
+- 简洁自然
+- 不要伪造具体文献或数据来源
+- 若涉及专业细节但无上下文支撑,请明确说明是一般性知识
+
+用户问题:
+{query}
+""".strip()
+
+HYBRID_RAG_PROMPT_TEMPLATE = """
+你是航天领域事实问答助手。你会收到按相关性排序的文本证据片段,请严格基于证据作答。
+
+要求:
+1. 回答正文应自然连贯,不要使用“直接答案”“证据依据”等分节标题。
+2. 关键信息需要有可追溯引用,引用编号使用 [1]、[2] 等格式。
+3. 引用标号尽量集中放在回答末尾,不要在句中频繁插入。
+4. 不得编造未在证据中出现的事实、时间、参数、型号。
+5. 若证据不足,明确写:信息不足,缺少 xxx。
+6. 输出中文,术语严谨,避免冗长。
+
+问题:
+{query}
+
+证据片段:
+{context}
+""".strip()
+
+GRAPH_LOCAL_PROMPT_TEMPLATE = """
+你是航天知识图谱推理助手。你将获得一个局部子图上下文(实体、关系、证据)。
+
+要求:
+1. 输出结构固定为:
+- 结论
+- 推理链路
+- 证据映射
+- 不确定性
+2. 推理链路需按步骤编号(步骤1、步骤2...),明确“实体 -> 关系 -> 实体/结论”的链式过程。
+3. 若局部子图不完整,必须指出断点,不能臆造链路。
+4. 输出中文。
+
+问题:
+{query}
+
+局部子图上下文:
+{graph_context}
+""".strip()
+
+GRAPH_GLOBAL_PROMPT_TEMPLATE = """
+你是航天领域全局分析助手。你将获得多个社区摘要,请进行跨社区综合研判。
+
+要求:
+1. 输出结构固定为:
+- 总体结论
+- 跨社区共性
+- 关键差异
+- 趋势判断
+- 风险与建议
+2. 每条关键判断尽量给出对应社区编号。
+3. 仅依据输入摘要,证据不足时明确说明。
+4. 输出中文,适合技术管理层阅读。
+
+问题:
+{query}
+
+社区摘要:
+{community_context}
+""".strip()
diff --git a/rag-web-ui/backend/app/services/graph/__init__.py b/rag-web-ui/backend/app/services/graph/__init__.py
new file mode 100644
index 0000000..0e01546
--- /dev/null
+++ b/rag-web-ui/backend/app/services/graph/__init__.py
@@ -0,0 +1,3 @@
+from app.services.graph.graphrag_adapter import GraphRAGAdapter
+
+__all__ = ["GraphRAGAdapter"]
diff --git a/rag-web-ui/backend/app/services/graph/graphrag_adapter.py b/rag-web-ui/backend/app/services/graph/graphrag_adapter.py
new file mode 100644
index 0000000..b2015c4
--- /dev/null
+++ b/rag-web-ui/backend/app/services/graph/graphrag_adapter.py
@@ -0,0 +1,183 @@
+import asyncio
+import importlib
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Tuple
+
+import numpy as np
+
+from app.core.config import settings
+from app.services.embedding.embedding_factory import EmbeddingsFactory
+from app.services.llm.llm_factory import LLMFactory
+
+
+class GraphRAGAdapter:
+ _instance_lock = asyncio.Lock()
+
+ def __init__(self):
+ self._graphrag_instances: Dict[int, Any] = {}
+ self._kb_locks: Dict[int, asyncio.Lock] = {}
+ self._embedding_model = EmbeddingsFactory.create()
+ self._llm_model = LLMFactory.create(streaming=False)
+ self._symbols = self._load_symbols()
+
+ def _load_symbols(self) -> Dict[str, Any]:
+ module = importlib.import_module("nano_graphrag")
+
+ storage_module = importlib.import_module("nano_graphrag._storage")
+ utils_module = importlib.import_module("nano_graphrag._utils")
+
+ return {
+ "GraphRAG": module.GraphRAG,
+ "QueryParam": module.QueryParam,
+ "Neo4jStorage": getattr(storage_module, "Neo4jStorage"),
+ "NetworkXStorage": getattr(storage_module, "NetworkXStorage"),
+ "EmbeddingFunc": getattr(utils_module, "EmbeddingFunc"),
+ }
+
+ def _get_kb_lock(self, kb_id: int) -> asyncio.Lock:
+ if kb_id not in self._kb_locks:
+ self._kb_locks[kb_id] = asyncio.Lock()
+ return self._kb_locks[kb_id]
+
+ async def _llm_complete(self, prompt: str, system_prompt: Optional[str] = None, history_messages: Optional[List[Any]] = None, **kwargs: Any) -> str:
+ history_messages = history_messages or []
+
+ history_lines: List[str] = []
+ for item in history_messages:
+ if isinstance(item, dict):
+ role = str(item.get("role", "user"))
+ content = item.get("content", "")
+ if isinstance(content, list):
+ joined = " ".join(str(part.get("text", "")) for part in content if isinstance(part, dict))
+ history_lines.append(f"{role}: {joined}")
+ else:
+ history_lines.append(f"{role}: {content}")
+ else:
+ history_lines.append(str(item))
+
+ full_prompt = "\n\n".join(
+ part
+ for part in [
+ f"系统提示: {system_prompt}" if system_prompt else "",
+ "历史对话:\n" + "\n".join(history_lines) if history_lines else "",
+ "用户输入:\n" + prompt,
+ ]
+ if part
+ )
+
+ model = self._llm_model
+ max_tokens = kwargs.get("max_tokens")
+ if max_tokens is not None:
+ try:
+ model = model.bind(max_tokens=max_tokens)
+ except Exception:
+ pass
+
+ response = await model.ainvoke(full_prompt)
+ content = getattr(response, "content", response)
+ if isinstance(content, str):
+ return content
+ return str(content)
+
+ async def _embedding_call(self, texts: List[str]) -> np.ndarray:
+ vectors = await asyncio.to_thread(self._embedding_model.embed_documents, texts)
+ return np.array(vectors)
+
+ async def _get_or_create(self, kb_id: int) -> Any:
+ if kb_id in self._graphrag_instances:
+ return self._graphrag_instances[kb_id]
+
+ async with GraphRAGAdapter._instance_lock:
+ if kb_id in self._graphrag_instances:
+ return self._graphrag_instances[kb_id]
+
+ GraphRAG = self._symbols["GraphRAG"]
+ EmbeddingFunc = self._symbols["EmbeddingFunc"]
+
+ embedding_func = EmbeddingFunc(
+ embedding_dim=settings.GRAPHRAG_EMBEDDING_DIM,
+ max_token_size=settings.GRAPHRAG_EMBEDDING_MAX_TOKEN_SIZE,
+ func=self._embedding_call,
+ )
+
+ graph_storage_cls = self._symbols["NetworkXStorage"]
+ addon_params: Dict[str, Any] = {}
+ if settings.GRAPHRAG_GRAPH_STORAGE.lower() == "neo4j":
+ graph_storage_cls = self._symbols["Neo4jStorage"]
+ addon_params = {
+ "neo4j_url": settings.NEO4J_URL,
+ "neo4j_auth": (settings.NEO4J_USERNAME, settings.NEO4J_PASSWORD),
+ }
+
+ working_dir = str(Path(settings.GRAPHRAG_WORKING_DIR) / f"kb_{kb_id}")
+
+ rag = GraphRAG(
+ working_dir=working_dir,
+ enable_local=True,
+ enable_naive_rag=True,
+ graph_storage_cls=graph_storage_cls,
+ addon_params=addon_params,
+ embedding_func=embedding_func,
+ best_model_func=self._llm_complete,
+ cheap_model_func=self._llm_complete,
+ entity_extract_max_gleaning=settings.GRAPHRAG_ENTITY_EXTRACT_MAX_GLEANING,
+ )
+ self._graphrag_instances[kb_id] = rag
+ return rag
+
+ async def ingest_texts(self, kb_id: int, texts: List[str]) -> None:
+ cleaned = [text.strip() for text in texts if text and text.strip()]
+ if not cleaned:
+ return
+
+ rag = await self._get_or_create(kb_id)
+ lock = self._get_kb_lock(kb_id)
+ async with lock:
+ await rag.ainsert(cleaned)
+
+ async def local_context(self, kb_id: int, query: str, *, top_k: int = 20, level: int = 2) -> str:
+ rag = await self._get_or_create(kb_id)
+ QueryParam = self._symbols["QueryParam"]
+ param = QueryParam(
+ mode="local",
+ top_k=top_k,
+ level=level,
+ only_need_context=True,
+ )
+ return await rag.aquery(query, param)
+
+ async def global_context(self, kb_id: int, query: str, *, level: int = 2) -> str:
+ rag = await self._get_or_create(kb_id)
+ QueryParam = self._symbols["QueryParam"]
+ param = QueryParam(
+ mode="global",
+ level=level,
+ only_need_context=True,
+ )
+ return await rag.aquery(query, param)
+
+ async def local_context_multi(self, kb_ids: List[int], query: str, *, top_k: int = 20, level: int = 2) -> Tuple[str, List[int]]:
+ contexts: List[str] = []
+ used_kb_ids: List[int] = []
+ for kb_id in kb_ids:
+ try:
+ ctx = await self.local_context(kb_id, query, top_k=top_k, level=level)
+ if ctx:
+ contexts.append(f"[KB:{kb_id}]\n{ctx}")
+ used_kb_ids.append(kb_id)
+ except Exception:
+ continue
+ return "\n\n".join(contexts), used_kb_ids
+
+ async def global_context_multi(self, kb_ids: List[int], query: str, *, level: int = 2) -> Tuple[str, List[int]]:
+ contexts: List[str] = []
+ used_kb_ids: List[int] = []
+ for kb_id in kb_ids:
+ try:
+ ctx = await self.global_context(kb_id, query, level=level)
+ if ctx:
+ contexts.append(f"[KB:{kb_id}]\n{ctx}")
+ used_kb_ids.append(kb_id)
+ except Exception:
+ continue
+ return "\n\n".join(contexts), used_kb_ids
diff --git a/rag-web-ui/backend/app/services/hybrid_retriever.py b/rag-web-ui/backend/app/services/hybrid_retriever.py
new file mode 100644
index 0000000..fd9b1ed
--- /dev/null
+++ b/rag-web-ui/backend/app/services/hybrid_retriever.py
@@ -0,0 +1,85 @@
+import re
+from typing import Any, Dict, List
+
+from app.services.vector_store.base import BaseVectorStore
+
+
+def _tokenize_for_keyword_score(text: str) -> List[str]:
+ """Simple multilingual tokenizer for lexical matching without extra dependencies."""
+ tokens = re.findall(r"[A-Za-z0-9_]+|[\u4e00-\u9fff]", text.lower())
+ return [token for token in tokens if token.strip()]
+
+
+def _keyword_score(query: str, doc_text: str) -> float:
+ query_terms = set(_tokenize_for_keyword_score(query))
+ doc_terms = set(_tokenize_for_keyword_score(doc_text))
+
+ if not query_terms or not doc_terms:
+ return 0.0
+
+ overlap = len(query_terms.intersection(doc_terms))
+ return overlap / max(1, len(query_terms))
+
+
+def hybrid_search(
+ vector_store: BaseVectorStore,
+ query: str,
+ top_k: int = 6,
+ fetch_k: int = 20,
+ alpha: float = 0.65,
+) -> List[Dict[str, Any]]:
+ """
+ Hybrid retrieval via vector candidate generation + lexical reranking.
+
+ score = alpha * vector_rank_score + (1 - alpha) * keyword_score
+ """
+ raw_results = vector_store.similarity_search_with_score(query, k=fetch_k)
+ if not raw_results:
+ return []
+
+ ranked: List[Dict[str, Any]] = []
+ total = len(raw_results)
+
+ for index, item in enumerate(raw_results):
+ if not isinstance(item, (tuple, list)) or len(item) < 1:
+ continue
+
+ doc = item[0]
+ if not hasattr(doc, "page_content"):
+ continue
+
+ rank_score = 1.0 - (index / max(1, total))
+ lexical_score = _keyword_score(query, doc.page_content)
+ final_score = alpha * rank_score + (1.0 - alpha) * lexical_score
+
+ ranked.append(
+ {
+ "document": doc,
+ "vector_rank_score": round(rank_score, 6),
+ "keyword_score": round(lexical_score, 6),
+ "final_score": round(final_score, 6),
+ }
+ )
+
+ ranked.sort(key=lambda row: row["final_score"], reverse=True)
+ return ranked[:top_k]
+
+
+def format_hybrid_context(rows: List[Dict[str, Any]]) -> str:
+ parts: List[str] = []
+
+ for i, row in enumerate(rows, start=1):
+ doc = row["document"]
+ metadata = doc.metadata or {}
+ source = metadata.get("source") or metadata.get("file_name") or "unknown"
+ chunk_id = metadata.get("chunk_id") or "unknown"
+
+ parts.append(
+ (
+ f"[{i}] source={source}, chunk_id={chunk_id}, "
+ f"score={row['final_score']}\n"
+ f"{doc.page_content.strip()}"
+ )
+ )
+
+ return "\n\n".join(parts)
diff --git a/rag-web-ui/backend/app/services/intent_router.py b/rag-web-ui/backend/app/services/intent_router.py
new file mode 100644
index 0000000..c07d797
--- /dev/null
+++ b/rag-web-ui/backend/app/services/intent_router.py
@@ -0,0 +1,120 @@
+import json
+import re
+from typing import Any, Dict, List
+
+from app.services.fusion_prompts import (
+ ROUTER_SYSTEM_PROMPT,
+ ROUTER_USER_PROMPT_TEMPLATE,
+)
+
+VALID_INTENTS = {"A", "B", "C", "D"}
+
+
+def _extract_json_object(raw_text: str) -> Dict[str, str]:
+ """Extract and parse the first JSON object from model output."""
+ cleaned = raw_text.strip()
+ cleaned = cleaned.replace("```json", "").replace("```", "").strip()
+
+ match = re.search(r"\{[\s\S]*\}", cleaned)
+ if not match:
+ raise ValueError("No JSON object found in router output")
+
+ data = json.loads(match.group(0))
+ if not isinstance(data, dict):
+ raise ValueError("Router output JSON is not an object")
+
+ intent = str(data.get("intent", "")).strip().upper()
+ reason = str(data.get("reason", "")).strip()
+ if intent not in VALID_INTENTS:
+ raise ValueError(f"Invalid intent: {intent}")
+
+ if not reason:
+ reason = "模型未提供理由,已按规则兜底。"
+
+ return {"intent": intent, "reason": reason}
+
+
+def _build_history_text(messages: dict, max_turns: int = 6) -> str:
+ if not isinstance(messages, dict):
+ return ""
+
+ history = messages.get("messages", [])
+ if not isinstance(history, list):
+ return ""
+
+ tail = history[-max_turns:]
+ rows: List[str] = []
+ for msg in tail:
+ role = str(msg.get("role", "unknown")).strip()
+ content = str(msg.get("content", "")).strip().replace("\n", " ")
+ if content:
+ rows.append(f"{role}: {content}")
+ return "\n".join(rows)
+
+
+def _heuristic_route(query: str) -> Dict[str, str]:
+ text = query.strip().lower()
+
+ general_chat_patterns = [
+ "你好",
+ "您好",
+ "在吗",
+ "谢谢",
+ "早上好",
+ "晚上好",
+ "你是谁",
+ "讲个笑话",
+ ]
+ global_patterns = [
+ "总结",
+ "综述",
+ "整体",
+ "全局",
+ "趋势",
+ "对比",
+ "比较",
+ "宏观",
+ "共性",
+ "差异",
+ ]
+ local_graph_patterns = [
+ "关系",
+ "依赖",
+ "影响",
+ "导致",
+ "原因",
+ "链路",
+ "多跳",
+ "传导",
+ "耦合",
+ "约束",
+ ]
+
+ if any(token in text for token in general_chat_patterns):
+ return {"intent": "A", "reason": "命中通用对话关键词,且不依赖知识库检索。"}
+
+ if any(token in text for token in global_patterns):
+ return {"intent": "D", "reason": "问题指向全局总结或跨主题趋势分析。"}
+
+ if any(token in text for token in local_graph_patterns):
+ return {"intent": "C", "reason": "问题强调实体关系与链式推理。"}
+
+ return {"intent": "B", "reason": "默认归入事实查询,适合混合检索链路。"}
+
+
+async def route_intent(llm: Any, query: str, messages: dict) -> Dict[str, str]:
+ """Route user query to A/B/C/D with LLM-first and heuristic fallback."""
+ history_text = _build_history_text(messages)
+ user_prompt = ROUTER_USER_PROMPT_TEMPLATE.format(
+ chat_history=history_text or "无",
+ query=query,
+ )
+
+ try:
+ full_prompt = f"{ROUTER_SYSTEM_PROMPT}\n\n{user_prompt}"
+ model_resp = await llm.ainvoke(full_prompt)
+ content = getattr(model_resp, "content", model_resp)
+ raw_text = content if isinstance(content, str) else str(content)
+ return _extract_json_object(raw_text)
+ except Exception:
+ return _heuristic_route(query)
diff --git a/rag-web-ui/backend/app/services/llm/llm_factory.py b/rag-web-ui/backend/app/services/llm/llm_factory.py
new file mode 100644
index 0000000..0cd2717
--- /dev/null
+++ b/rag-web-ui/backend/app/services/llm/llm_factory.py
@@ -0,0 +1,57 @@
+from typing import Optional
+from langchain_core.language_models import BaseChatModel
+from langchain_openai import ChatOpenAI
+from langchain_deepseek import ChatDeepSeek
+from langchain_ollama import OllamaLLM
+from app.core.config import settings
+
+class LLMFactory:
+ @staticmethod
+ def create(
+ provider: Optional[str] = None,
+ temperature: float = 0,
+ streaming: bool = True,
+ ) -> BaseChatModel:
+ """
+ Create a LLM instance based on the provider
+ """
+ # If no provider specified, use the one from settings
+ provider = provider or settings.CHAT_PROVIDER
+
+ if provider.lower() == "openai":
+ return ChatOpenAI(
+ temperature=temperature,
+ streaming=streaming,
+ model=settings.OPENAI_MODEL,
+ openai_api_key=settings.OPENAI_API_KEY,
+ openai_api_base=settings.OPENAI_API_BASE
+ )
+ elif provider.lower() == "deepseek":
+ return ChatDeepSeek(
+ temperature=temperature,
+ streaming=streaming,
+ model=settings.DEEPSEEK_MODEL,
+ api_key=settings.DEEPSEEK_API_KEY,
+ api_base=settings.DEEPSEEK_API_BASE
+ )
+ elif provider.lower() == "dashscope":
+ return ChatOpenAI(
+ temperature=temperature,
+ streaming=streaming,
+ model=settings.DASH_SCOPE_CHAT_MODEL,
+ openai_api_key=settings.DASH_SCOPE_API_KEY,
+ openai_api_base=settings.DASH_SCOPE_API_BASE,
+ )
+ elif provider.lower() == "ollama":
+ # Initialize Ollama model
+ return OllamaLLM(
+ model=settings.OLLAMA_MODEL,
+ base_url=settings.OLLAMA_API_BASE,
+ temperature=temperature,
+ streaming=streaming
+ )
+ # Add more providers here as needed
+ # elif provider.lower() == "anthropic":
+ # return ChatAnthropic(...)
+ else:
+ raise ValueError(f"Unsupported LLM provider: {provider}")
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/services/reranker/__init__.py b/rag-web-ui/backend/app/services/reranker/__init__.py
new file mode 100644
index 0000000..2d5665f
--- /dev/null
+++ b/rag-web-ui/backend/app/services/reranker/__init__.py
@@ -0,0 +1,3 @@
+from app.services.reranker.external_api import ExternalRerankerClient
+
+__all__ = ["ExternalRerankerClient"]
diff --git a/rag-web-ui/backend/app/services/reranker/external_api.py b/rag-web-ui/backend/app/services/reranker/external_api.py
new file mode 100644
index 0000000..9a1c9a5
--- /dev/null
+++ b/rag-web-ui/backend/app/services/reranker/external_api.py
@@ -0,0 +1,164 @@
+import asyncio
+import json
+from dataclasses import dataclass
+from typing import Any, Dict, List, Optional
+from urllib import request
+
+
+@dataclass
+class ExternalRerankerClient:
+ api_url: str
+ api_key: str = ""
+ model: str = ""
+ timeout_seconds: float = 8.0
+
+ @property
+ def enabled(self) -> bool:
+ return bool(self.api_url)
+
+ @property
+ def is_dashscope_rerank(self) -> bool:
+ return "dashscope.aliyuncs.com" in self.api_url and "/services/rerank/" in self.api_url
+
+ async def rerank(
+ self,
+ *,
+ query: str,
+ documents: List[str],
+ top_n: Optional[int] = None,
+ metadata: Optional[List[Dict[str, Any]]] = None,
+ ) -> Optional[List[float]]:
+ if not self.enabled:
+ return None
+ if not documents:
+ return []
+
+ payload = self._build_payload(
+ query=query,
+ documents=documents,
+ top_n=top_n or len(documents),
+ metadata=metadata,
+ )
+
+ try:
+ response = await asyncio.to_thread(self._post_json, payload)
+ scores = self._parse_scores(response, len(documents))
+ return scores
+ except Exception:
+ return None
+
+ def _post_json(self, payload: Dict[str, Any]) -> Dict[str, Any]:
+ headers = {"Content-Type": "application/json"}
+ if self.api_key:
+ headers["Authorization"] = f"Bearer {self.api_key}"
+
+ req = request.Request(
+ self.api_url,
+ data=json.dumps(payload).encode("utf-8"),
+ headers=headers,
+ method="POST",
+ )
+ with request.urlopen(req, timeout=self.timeout_seconds) as resp:
+ body = resp.read().decode("utf-8")
+ return json.loads(body)
+
+ def _build_payload(
+ self,
+ *,
+ query: str,
+ documents: List[str],
+ top_n: int,
+ metadata: Optional[List[Dict[str, Any]]],
+ ) -> Dict[str, Any]:
+ if self.is_dashscope_rerank:
+ payload = {
+ "model": self.model,
+ "input": {
+ "query": query,
+ "documents": documents,
+ },
+ "parameters": {
+ "return_documents": True,
+ "top_n": top_n,
+ },
+ }
+ if metadata:
+ payload["metadata"] = metadata
+ return payload
+
+ payload = {
+ "model": self.model,
+ "query": query,
+ "documents": documents,
+ "top_n": top_n,
+ }
+ if metadata:
+ payload["metadata"] = metadata
+ return payload
+
+ def _parse_scores(self, response: Dict[str, Any], expected_len: int) -> List[float]:
+ # DashScope format:
+ # {"output": {"results": [{"index": 0, "relevance_score": 0.98}, ...]}}
+ output_block = response.get("output")
+ if isinstance(output_block, dict) and isinstance(output_block.get("results"), list):
+ raw_results = output_block["results"]
+ scores = [0.0] * expected_len
+ for item in raw_results:
+ if not isinstance(item, dict):
+ continue
+ idx = item.get("index")
+ score = item.get("relevance_score", item.get("score", 0.0))
+ if isinstance(idx, int) and 0 <= idx < expected_len:
+ try:
+ scores[idx] = float(score)
+ except Exception:
+ scores[idx] = 0.0
+ return scores
+
+ # Common response format #1:
+ # {"results": [{"index": 0, "relevance_score": 0.98}, ...]}
+ if isinstance(response.get("results"), list):
+ raw_results = response["results"]
+ scores = [0.0] * expected_len
+ for item in raw_results:
+ if not isinstance(item, dict):
+ continue
+ idx = item.get("index")
+ score = item.get("relevance_score", item.get("score", 0.0))
+ if isinstance(idx, int) and 0 <= idx < expected_len:
+ try:
+ scores[idx] = float(score)
+ except Exception:
+ scores[idx] = 0.0
+ return scores
+
+ # Common response format #2:
+ # {"scores": [0.9, 0.1, ...]}
+ if isinstance(response.get("scores"), list):
+ values = response["scores"]
+ scores: List[float] = []
+ for i in range(expected_len):
+ try:
+ scores.append(float(values[i]))
+ except Exception:
+ scores.append(0.0)
+ return scores
+
+ # Common response format #3:
+ # {"data": [{"index": 0, "score": 0.88}, ...]}
+ if isinstance(response.get("data"), list):
+ raw_results = response["data"]
+ scores = [0.0] * expected_len
+ for item in raw_results:
+ if not isinstance(item, dict):
+ continue
+ idx = item.get("index")
+ score = item.get("score", item.get("relevance_score", 0.0))
+ if isinstance(idx, int) and 0 <= idx < expected_len:
+ try:
+ scores[idx] = float(score)
+ except Exception:
+ scores[idx] = 0.0
+ return scores
+
+ return [0.0] * expected_len
diff --git a/rag-web-ui/backend/app/services/retrieval/__init__.py b/rag-web-ui/backend/app/services/retrieval/__init__.py
new file mode 100644
index 0000000..5bb801d
--- /dev/null
+++ b/rag-web-ui/backend/app/services/retrieval/__init__.py
@@ -0,0 +1,3 @@
+from app.services.retrieval.multi_kb_retriever import MultiKBRetriever, format_retrieval_context
+
+__all__ = ["MultiKBRetriever", "format_retrieval_context"]
diff --git a/rag-web-ui/backend/app/services/retrieval/multi_kb_retriever.py b/rag-web-ui/backend/app/services/retrieval/multi_kb_retriever.py
new file mode 100644
index 0000000..1968be6
--- /dev/null
+++ b/rag-web-ui/backend/app/services/retrieval/multi_kb_retriever.py
@@ -0,0 +1,131 @@
+import re
+from typing import Any, Dict, List, Optional
+
+from app.services.reranker.external_api import ExternalRerankerClient
+
+
+def _tokenize(text: str) -> List[str]:
+ tokens = re.findall(r"[A-Za-z0-9_]+|[\u4e00-\u9fff]", text.lower())
+ return [token for token in tokens if token.strip()]
+
+
+def _keyword_score(query: str, text: str) -> float:
+ query_terms = set(_tokenize(query))
+ text_terms = set(_tokenize(text))
+ if not query_terms or not text_terms:
+ return 0.0
+ overlap = len(query_terms.intersection(text_terms))
+ return overlap / max(1, len(query_terms))
+
+
+def format_retrieval_context(rows: List[Dict[str, Any]]) -> str:
+ blocks: List[str] = []
+ for i, row in enumerate(rows, start=1):
+ doc = row["document"]
+ metadata = doc.metadata or {}
+ blocks.append(
+ (
+ f"[{i}] kb_id={row.get('kb_id')}, source={metadata.get('source') or metadata.get('file_name') or 'unknown'}, "
+ f"chunk_id={metadata.get('chunk_id') or 'unknown'}, score={row.get('final_score', 0):.6f}\n"
+ f"{doc.page_content.strip()}"
+ )
+ )
+ return "\n\n".join(blocks)
+
+
+class MultiKBRetriever:
+ def __init__(
+ self,
+ *,
+ reranker_client: Optional[ExternalRerankerClient] = None,
+ reranker_weight: float = 0.75,
+ vector_weight: float = 0.2,
+ keyword_weight: float = 0.05,
+ ):
+ self.reranker_client = reranker_client
+ self.reranker_weight = reranker_weight
+ self.vector_weight = vector_weight
+ self.keyword_weight = keyword_weight
+
+ async def retrieve(
+ self,
+ *,
+ query: str,
+ kb_vector_stores: List[Dict[str, Any]],
+ fetch_k_per_kb: int = 12,
+ top_k: int = 12,
+ ) -> List[Dict[str, Any]]:
+ candidates: List[Dict[str, Any]] = []
+
+ for kb_store in kb_vector_stores:
+ kb_id = kb_store["kb_id"]
+ vector_store = kb_store["store"]
+ raw = vector_store.similarity_search_with_score(query, k=fetch_k_per_kb)
+ total = len(raw)
+
+ for index, item in enumerate(raw):
+ if not isinstance(item, (tuple, list)) or not item:
+ continue
+
+ doc = item[0]
+ if not hasattr(doc, "page_content"):
+ continue
+
+ metadata = doc.metadata or {}
+ rank_score = 1.0 - (index / max(1, total))
+ lexical_score = _keyword_score(query, doc.page_content)
+
+ candidates.append(
+ {
+ "kb_id": kb_id,
+ "document": doc,
+ "chunk_key": f"{kb_id}:{metadata.get('chunk_id', index)}",
+ "vector_rank_score": round(rank_score, 6),
+ "keyword_score": round(lexical_score, 6),
+ }
+ )
+
+ if not candidates:
+ return []
+
+ # Dedupe by KB + chunk id to avoid repeated chunks from same collection.
+ unique_map: Dict[str, Dict[str, Any]] = {}
+ for row in candidates:
+ key = row["chunk_key"]
+ existing = unique_map.get(key)
+ if existing is None:
+ unique_map[key] = row
+ continue
+ if row["vector_rank_score"] > existing["vector_rank_score"]:
+ unique_map[key] = row
+
+ merged = list(unique_map.values())
+ merged.sort(key=lambda x: x["vector_rank_score"], reverse=True)
+
+ reranker_scores: Optional[List[float]] = None
+ if self.reranker_client is not None and self.reranker_client.enabled:
+ reranker_scores = await self.reranker_client.rerank(
+ query=query,
+ documents=[row["document"].page_content for row in merged],
+ top_n=min(top_k, len(merged)),
+ metadata=[{"kb_id": row["kb_id"]} for row in merged],
+ )
+
+ for idx, row in enumerate(merged):
+ base_score = (
+ self.vector_weight * row["vector_rank_score"]
+ + self.keyword_weight * row["keyword_score"]
+ )
+
+ if reranker_scores is not None:
+ rerank_value = float(reranker_scores[idx])
+ final_score = self.reranker_weight * rerank_value + (1 - self.reranker_weight) * base_score
+ row["reranker_score"] = round(rerank_value, 6)
+ else:
+ final_score = base_score
+ row["reranker_score"] = None
+
+ row["final_score"] = round(final_score, 6)
+
+ merged.sort(key=lambda x: x["final_score"], reverse=True)
+ return merged[:top_k]
diff --git a/rag-web-ui/backend/app/services/srs_job_service.py b/rag-web-ui/backend/app/services/srs_job_service.py
new file mode 100644
index 0000000..b782940
--- /dev/null
+++ b/rag-web-ui/backend/app/services/srs_job_service.py
@@ -0,0 +1,187 @@
+from __future__ import annotations
+
+from datetime import datetime
+from pathlib import Path
+from typing import Any, Dict, List
+
+from sqlalchemy.orm import Session
+
+from app.db.session import SessionLocal
+from app.models.tooling import SRSExtraction, SRSRequirement, ToolJob
+from app.tools.srs_reqs_qwen import get_srs_tool
+
+
+def run_srs_job(job_id: int) -> None:
+ db = SessionLocal()
+ try:
+ job = db.query(ToolJob).filter(ToolJob.id == job_id).first()
+ if not job:
+ return
+
+ job.status = "processing"
+ job.started_at = datetime.utcnow()
+ job.error_message = None
+ db.commit()
+
+ payload = get_srs_tool().run(job.input_file_path)
+
+ extraction = SRSExtraction(
+ job_id=job.id,
+ document_name=payload["document_name"],
+ document_title=payload.get("document_title") or payload["document_name"],
+ generated_at=_parse_generated_at(payload.get("generated_at")),
+ total_requirements=len(payload.get("requirements", [])),
+ statistics=payload.get("statistics", {}),
+ raw_output=payload.get("raw_output", {}),
+ )
+ db.add(extraction)
+ db.flush()
+
+ for item in payload.get("requirements", []):
+ requirement = SRSRequirement(
+ extraction_id=extraction.id,
+ requirement_uid=item["id"],
+ title=item.get("title") or item["id"],
+ description=item.get("description") or "",
+ priority=item.get("priority") or "中",
+ acceptance_criteria=item.get("acceptance_criteria") or ["待补充验收标准"],
+ source_field=item.get("source_field") or "文档解析",
+ section_number=item.get("section_number"),
+ section_title=item.get("section_title"),
+ requirement_type=item.get("requirement_type"),
+ sort_order=int(item.get("sort_order") or 0),
+ )
+ db.add(requirement)
+
+ job.status = "completed"
+ job.completed_at = datetime.utcnow()
+ job.output_summary = {
+ "total_requirements": extraction.total_requirements,
+ "document_name": extraction.document_name,
+ }
+ db.commit()
+ except Exception as exc:
+ db.rollback()
+ _mark_job_failed(job_id=job_id, error_message=str(exc))
+ finally:
+ db.close()
+
+
+def _mark_job_failed(job_id: int, error_message: str) -> None:
+ db = SessionLocal()
+ try:
+ job = db.query(ToolJob).filter(ToolJob.id == job_id).first()
+ if not job:
+ return
+ job.status = "failed"
+ job.completed_at = datetime.utcnow()
+ job.error_message = error_message[:2000]
+ db.commit()
+ finally:
+ db.close()
+
+
+def _parse_generated_at(value: Any) -> datetime:
+ if isinstance(value, str):
+ try:
+ return datetime.fromisoformat(value)
+ except ValueError:
+ return datetime.utcnow()
+ return datetime.utcnow()
+
+
+def ensure_upload_path(job_id: int, file_name: str) -> Path:
+ target_dir = Path("uploads") / "srs_jobs" / str(job_id)
+ target_dir.mkdir(parents=True, exist_ok=True)
+ return target_dir / file_name
+
+
+def build_result_response(job: ToolJob, extraction: SRSExtraction) -> Dict[str, Any]:
+ requirements: List[Dict[str, Any]] = []
+ for item in extraction.requirements:
+ requirements.append(
+ {
+ "id": item.requirement_uid,
+ "title": item.title,
+ "description": item.description,
+ "priority": item.priority,
+ "acceptanceCriteria": item.acceptance_criteria or [],
+ "sourceField": item.source_field,
+ "sectionNumber": item.section_number,
+ "sectionTitle": item.section_title,
+ "requirementType": item.requirement_type,
+ "sortOrder": item.sort_order,
+ }
+ )
+
+ return {
+ "jobId": job.id,
+ "documentName": extraction.document_name,
+ "generatedAt": extraction.generated_at.isoformat(),
+ "statistics": extraction.statistics or {},
+ "requirements": requirements,
+ }
+
+
+def replace_requirements(db: Session, extraction: SRSExtraction, updates: List[Dict[str, Any]]) -> None:
+ existing = {
+ req.requirement_uid: req
+ for req in db.query(SRSRequirement)
+ .filter(SRSRequirement.extraction_id == extraction.id)
+ .all()
+ }
+ seen_ids = set()
+
+ for index, item in enumerate(updates):
+ uid = item["id"]
+ seen_ids.add(uid)
+ req = existing.get(uid)
+ if req is None:
+ req = SRSRequirement(
+ extraction_id=extraction.id,
+ requirement_uid=uid,
+ title=item.get("title") or uid,
+ description=item.get("description") if item.get("description") is not None else "",
+ priority=item.get("priority") or "中",
+ acceptance_criteria=item.get("acceptanceCriteria") or ["待补充验收标准"],
+ source_field=item.get("sourceField") or "文档解析",
+ section_number=item.get("sectionNumber"),
+ section_title=item.get("sectionTitle"),
+ requirement_type=item.get("requirementType"),
+ sort_order=int(item.get("sortOrder") or index),
+ )
+ db.add(req)
+ continue
+
+ req.title = item.get("title", req.title)
+ req.description = item.get("description", req.description)
+ req.priority = item.get("priority", req.priority)
+ req.acceptance_criteria = item.get("acceptanceCriteria", req.acceptance_criteria)
+ req.source_field = item.get("sourceField", req.source_field)
+ req.section_number = item.get("sectionNumber", req.section_number)
+ req.section_title = item.get("sectionTitle", req.section_title)
+ req.requirement_type = item.get("requirementType", req.requirement_type)
+ req.sort_order = int(item.get("sortOrder", index))
+
+ for uid, req in existing.items():
+ if uid not in seen_ids:
+ db.delete(req)
+
+ extraction.total_requirements = len(updates)
+ extraction.statistics = {
+ "total": len(updates),
+ "by_type": _count_requirement_types(updates),
+ }
+ extraction.raw_output = {
+ "document_name": extraction.document_name,
+ "generated_at": extraction.generated_at.isoformat(),
+ "requirements": updates,
+ }
+
+
+def _count_requirement_types(items: List[Dict[str, Any]]) -> Dict[str, int]:
+ stats: Dict[str, int] = {}
+ for item in items:
+ req_type = item.get("requirementType") or "functional"
+ stats[req_type] = stats.get(req_type, 0) + 1
+ return stats
diff --git a/rag-web-ui/backend/app/services/testing_pipeline/__init__.py b/rag-web-ui/backend/app/services/testing_pipeline/__init__.py
new file mode 100644
index 0000000..0dfcca7
--- /dev/null
+++ b/rag-web-ui/backend/app/services/testing_pipeline/__init__.py
@@ -0,0 +1,3 @@
+from app.services.testing_pipeline.pipeline import run_testing_pipeline
+
+__all__ = ["run_testing_pipeline"]
diff --git a/rag-web-ui/backend/app/services/testing_pipeline/base.py b/rag-web-ui/backend/app/services/testing_pipeline/base.py
new file mode 100644
index 0000000..fd25202
--- /dev/null
+++ b/rag-web-ui/backend/app/services/testing_pipeline/base.py
@@ -0,0 +1,20 @@
+from __future__ import annotations
+
+from abc import ABC, abstractmethod
+from dataclasses import dataclass
+from typing import Any, Dict
+
+
+@dataclass
+class ToolExecutionResult:
+ context: Dict[str, Any]
+ output_summary: str
+ fallback_used: bool = False
+
+
+class TestingTool(ABC):
+ name: str
+
+ @abstractmethod
+ def execute(self, context: Dict[str, Any]) -> ToolExecutionResult:
+ raise NotImplementedError
diff --git a/rag-web-ui/backend/app/services/testing_pipeline/pipeline.py b/rag-web-ui/backend/app/services/testing_pipeline/pipeline.py
new file mode 100644
index 0000000..e2ce26a
--- /dev/null
+++ b/rag-web-ui/backend/app/services/testing_pipeline/pipeline.py
@@ -0,0 +1,99 @@
+from __future__ import annotations
+
+from time import perf_counter
+from typing import Any, Dict, List, Optional
+from uuid import uuid4
+
+from app.services.llm.llm_factory import LLMFactory
+from app.services.testing_pipeline.tools import build_default_tool_chain
+
+
+def _build_input_summary(context: Dict[str, Any]) -> str:
+ req_text = str(context.get("user_requirement_text", "")).strip()
+ req_type = str(context.get("requirement_type_input", "")).strip() or "auto"
+ short_text = req_text if len(req_text) <= 60 else f"{req_text[:60]}..."
+ return f"requirement_type_input={req_type}; requirement_text={short_text}"
+
+
+def _build_output_summary(context: Dict[str, Any]) -> str:
+ req_type_result = context.get("requirement_type_result", {})
+ req_type = req_type_result.get("requirement_type", "")
+ test_items = context.get("test_items", {})
+ test_cases = context.get("test_cases", {})
+
+ return (
+ f"requirement_type={req_type}; "
+ f"items={len(test_items.get('normal', [])) + len(test_items.get('abnormal', []))}; "
+ f"cases={len(test_cases.get('normal', [])) + len(test_cases.get('abnormal', []))}"
+ )
+
+
+def run_testing_pipeline(
+ user_requirement_text: str,
+ requirement_type_input: Optional[str] = None,
+ debug: bool = False,
+ knowledge_context: Optional[str] = None,
+ use_model_generation: bool = False,
+ max_items_per_group: int = 12,
+ cases_per_item: int = 2,
+ max_focus_points: int = 6,
+ max_llm_calls: int = 10,
+) -> Dict[str, Any]:
+ llm_model = None
+ if use_model_generation:
+ try:
+ llm_model = LLMFactory.create(streaming=False)
+ except Exception:
+ llm_model = None
+
+ context: Dict[str, Any] = {
+ "trace_id": str(uuid4()),
+ "user_requirement_text": user_requirement_text,
+ "requirement_type_input": requirement_type_input,
+ "debug": bool(debug),
+ "knowledge_context": (knowledge_context or "").strip(),
+ "knowledge_used": bool((knowledge_context or "").strip()),
+ "use_model_generation": bool(use_model_generation),
+ "llm_model": llm_model,
+ "max_items_per_group": max(4, min(int(max_items_per_group), 30)),
+ "cases_per_item": max(1, min(int(cases_per_item), 5)),
+ "max_focus_points": max(3, min(int(max_focus_points), 12)),
+ "llm_call_budget": max(0, min(int(max_llm_calls), 100)),
+ }
+
+ step_logs: List[Dict[str, Any]] = []
+
+ for tool in build_default_tool_chain():
+ start = perf_counter()
+ input_summary = _build_input_summary(context)
+
+ execution = tool.execute(context)
+ context = execution.context
+
+ duration_ms = (perf_counter() - start) * 1000
+ step_logs.append(
+ {
+ "step_name": tool.name,
+ "input_summary": input_summary,
+ "output_summary": execution.output_summary,
+ "success": True,
+ "fallback_used": execution.fallback_used,
+ "duration_ms": round(duration_ms, 3),
+ }
+ )
+
+ req_result = context.get("requirement_type_result", {})
+
+ return {
+ "trace_id": context.get("trace_id"),
+ "requirement_type": req_result.get("requirement_type", "未知类型"),
+ "reason": req_result.get("reason", ""),
+ "candidates": req_result.get("candidates", []),
+ "test_items": context.get("test_items", {"normal": [], "abnormal": []}),
+ "test_cases": context.get("test_cases", {"normal": [], "abnormal": []}),
+ "expected_results": context.get("expected_results", {"normal": [], "abnormal": []}),
+ "formatted_output": context.get("formatted_output", ""),
+ "pipeline_summary": _build_output_summary(context),
+ "knowledge_used": bool(context.get("knowledge_used", False)),
+ "step_logs": step_logs if debug else [],
+ }
diff --git a/rag-web-ui/backend/app/services/testing_pipeline/rules.py b/rag-web-ui/backend/app/services/testing_pipeline/rules.py
new file mode 100644
index 0000000..738f10a
--- /dev/null
+++ b/rag-web-ui/backend/app/services/testing_pipeline/rules.py
@@ -0,0 +1,203 @@
+from __future__ import annotations
+
+from typing import Dict, List
+
+
+REQUIREMENT_TYPES: List[str] = [
+ "功能测试",
+ "性能测试",
+ "外部接口测试",
+ "人机交互界面测试",
+ "强度测试",
+ "余量测试",
+ "可靠性测试",
+ "安全性测试",
+ "恢复性测试",
+ "边界测试",
+ "安装性测试",
+ "互操作性测试",
+ "敏感性测试",
+ "测试充分性要求",
+]
+
+
+
+TYPE_SIGNAL_RULES: Dict[str, str] = {
+ "功能测试": "关注功能需求逐项验证、业务流程正确性、输入输出行为、状态转换与边界值处理。",
+ "性能测试": "关注处理精度、响应时间、处理数据量、系统协调性、负载潜力与运行占用空间。",
+ "外部接口测试": "关注外部输入输出接口的格式、内容、协议与正常/异常交互表现。",
+ "人机交互界面测试": "关注界面一致性、界面风格、操作流程、误操作健壮性与错误提示能力。",
+ "强度测试": "关注系统在极限、超负荷、饱和和降级条件下的稳定性与承受能力。",
+ "余量测试": "关注存储余量、输入输出通道余量、功能处理时间余量等资源裕度。",
+ "可靠性测试": "关注真实或仿真环境下的失效等级、运行剖面、输入覆盖和长期稳定运行能力。",
+ "安全性测试": "关注危险状态响应、安全关键部件、异常输入防护、非法访问阻断和数据完整性保护。",
+ "恢复性测试": "关注故障探测、备用切换、系统状态保护与从无错误状态继续执行能力。",
+ "边界测试": "关注输入输出域边界、状态转换端点、功能界限、性能界限与容量界限。",
+ "安装性测试": "关注不同配置下安装卸载流程和安装规程执行正确性。",
+ "互操作性测试": "关注多个软件并行运行时的互操作能力与协同正确性。",
+ "敏感性测试": "关注有效输入类中可能引发不稳定或不正常处理的数据组合。",
+ "测试充分性要求": "关注需求覆盖率、配置项覆盖、语句覆盖、分支覆盖及未覆盖分析确认。",
+}
+
+
+DECOMPOSE_FORCE_RULES: List[str] = [
+ "每个软件功能至少应被正常测试与被认可的异常场景覆盖;复杂功能需继续细分。",
+ "每个测试项必须语义完整、可直接执行。",
+ "覆盖必须包含:正常流程、边界条件(适用时)、异常条件。",
+ "粒度需适中,避免过粗或过细。",
+ "对未知类型必须执行通用分解,并保持正常/异常分组。",
+ "对需求说明未显式给出但在用户手册或操作手册体现的功能,也应补充测试项覆盖。",
+]
+
+
+REQUIREMENT_RULES: Dict[str, Dict[str, List[str]]] = {
+ "功能测试": {
+ "keywords": ["功能", "业务流程", "输入输出", "状态转换", "边界值"],
+ "normal": [
+ "正常覆盖功能主路径、基本数据类型、合法边界值与状态转换。",
+ ],
+ "abnormal": [
+ "异常覆盖非法输入、不规则输入、非法边界值与最坏情况。",
+ ],
+ },
+ "性能测试": {
+ "keywords": ["性能", "处理精度", "响应时间", "处理数据量", "负载", "占用空间"],
+ "normal": [
+ "正常覆盖处理精度、响应时间、处理数据量与模块协调性。",
+ ],
+ "abnormal": [
+ "异常覆盖超负荷、软硬件限制、负载潜力上限与资源占用异常。",
+ ],
+ },
+ "外部接口测试": {
+ "keywords": ["外部接口", "输入接口", "输出接口", "格式", "内容", "协议", "异常交互"],
+ "normal": [
+ "正常覆盖全部外部接口格式与内容正确性。",
+ ],
+ "abnormal": [
+ "异常覆盖每个输入输出接口的错误格式、错误内容与异常交互。",
+ ],
+ },
+ "人机交互界面测试": {
+ "keywords": ["界面", "风格", "交互", "误操作", "错误提示", "操作流程"],
+ "normal": [
+ "正常覆盖界面风格一致性与标准操作流程。",
+ ],
+ "abnormal": [
+ "异常覆盖误操作、快速操作、非法输入、错误命令与错误流程提示。",
+ ],
+ },
+ "强度测试": {
+ "keywords": ["强度", "极限", "超负荷", "饱和", "降级", "健壮性"],
+ "normal": [
+ "正常覆盖设计极限下系统功能和性能表现。",
+ ],
+ "abnormal": [
+ "异常覆盖超出极限时的降级行为、健壮性与饱和表现。",
+ ],
+ },
+ "余量测试": {
+ "keywords": ["余量", "存储余量", "通道余量", "处理时间余量", "资源裕度"],
+ "normal": [
+ "正常覆盖存储、通道、处理时间余量是否满足要求。",
+ ],
+ "abnormal": [
+ "异常覆盖余量不足或耗尽时系统告警与受控行为。",
+ ],
+ },
+ "可靠性测试": {
+ "keywords": ["可靠性", "运行剖面", "失效等级", "输入覆盖", "长期稳定"],
+ "normal": [
+ "正常覆盖典型环境、运行剖面与输入变量组合。",
+ ],
+ "abnormal": [
+ "异常覆盖失效等级场景、边界环境变化、不合法输入域及失效记录。",
+ ],
+ },
+ "安全性测试": {
+ "keywords": ["安全", "危险状态", "安全关键部件", "非法进入", "完整性", "防护"],
+ "normal": [
+ "正常覆盖安全关键部件、安全结构与合法操作路径。",
+ ],
+ "abnormal": [
+ "异常覆盖危险状态、故障模式、边界接合部、非法进入与数据完整性保护。",
+ ],
+ },
+ "恢复性测试": {
+ "keywords": ["恢复", "故障探测", "备用切换", "状态保护", "继续执行", "reset"],
+ "normal": [
+ "正常覆盖故障探测、备用切换、恢复后继续执行。",
+ ],
+ "abnormal": [
+ "异常覆盖故障中作业保护、状态保护与恢复失败路径。",
+ ],
+ },
+ "边界测试": {
+ "keywords": ["边界", "端点", "输入输出域", "状态转换", "性能界限", "容量界限"],
+ "normal": [
+ "正常覆盖输入输出域边界、状态转换端点与功能界限。",
+ ],
+ "abnormal": [
+ "异常覆盖性能界限、容量界限和越界端点。",
+ ],
+ },
+ "安装性测试": {
+ "keywords": ["安装", "卸载", "配置", "安装规程", "部署", "中断"],
+ "normal": [
+ "正常覆盖标准及不同配置下安装卸载流程。",
+ ],
+ "abnormal": [
+ "异常覆盖安装规程错误、依赖异常与中断后的处理。",
+ ],
+ },
+ "互操作性测试": {
+ "keywords": ["互操作", "并行运行", "协同", "兼容", "冲突", "互操作失败"],
+ "normal": [
+ "正常覆盖两个或多个软件同时运行与互操作过程。",
+ ],
+ "abnormal": [
+ "异常覆盖互操作失败、并行冲突与协同异常。",
+ ],
+ },
+ "敏感性测试": {
+ "keywords": ["敏感性", "输入类", "数据组合", "不稳定", "不正常处理"],
+ "normal": [
+ "正常覆盖有效输入类中典型数据组合。",
+ ],
+ "abnormal": [
+ "异常覆盖引发不稳定或不正常处理的特殊数据组合。",
+ ],
+ },
+ "测试充分性要求": {
+ "keywords": ["测试充分性", "需求覆盖率", "配置项覆盖", "语句覆盖", "分支覆盖", "未覆盖分析"],
+ "normal": [
+ "正常覆盖需求覆盖率、配置项覆盖与代码覆盖达标。",
+ ],
+ "abnormal": [
+ "异常覆盖未覆盖部分逐项分析、确认与报告输出。",
+ ],
+ },
+}
+
+
+GENERIC_DECOMPOSITION_RULES: Dict[str, List[str]] = {
+ "normal": [
+ "主流程正确性。",
+ "合法边界值。",
+ "标准输入输出。",
+ ],
+ "abnormal": [
+ "非法输入。",
+ "越界输入。",
+ "资源异常或状态冲突。",
+ ],
+}
+
+
+EXPECTED_RESULT_PLACEHOLDER_MAP: Dict[str, str] = {
+ "{{return_value}}": "接口或函数返回值验证。",
+ "{{state_change}}": "系统状态变化验证。",
+ "{{error_message}}": "异常场景错误信息验证。",
+ "{{data_persistence}}": "数据库或存储落库结果验证。",
+ "{{ui_display}}": "界面显示反馈验证。",
+}
diff --git a/rag-web-ui/backend/app/services/testing_pipeline/tools.py b/rag-web-ui/backend/app/services/testing_pipeline/tools.py
new file mode 100644
index 0000000..23d0968
--- /dev/null
+++ b/rag-web-ui/backend/app/services/testing_pipeline/tools.py
@@ -0,0 +1,867 @@
+from __future__ import annotations
+
+import json
+import re
+from collections import defaultdict
+from typing import Any, Dict, List, Optional, Tuple
+
+from app.services.testing_pipeline.base import TestingTool, ToolExecutionResult
+from app.services.testing_pipeline.rules import (
+ DECOMPOSE_FORCE_RULES,
+ EXPECTED_RESULT_PLACEHOLDER_MAP,
+ GENERIC_DECOMPOSITION_RULES,
+ REQUIREMENT_RULES,
+ REQUIREMENT_TYPES,
+ TYPE_SIGNAL_RULES,
+)
+
+
+def _clean_text(value: str) -> str:
+ return " ".join((value or "").replace("\n", " ").split())
+
+
+def _truncate_text(value: str, max_len: int = 2000) -> str:
+ text = _clean_text(value)
+ if len(text) <= max_len:
+ return text
+ return f"{text[:max_len]}..."
+
+
+def _safe_int(value: Any, default: int, low: int, high: int) -> int:
+ try:
+ parsed = int(value)
+ except Exception:
+ parsed = default
+ return max(low, min(parsed, high))
+
+
+def _strip_instruction_prefix(value: str) -> str:
+ text = _clean_text(value)
+ if not text:
+ return text
+
+ lowered = text.lower()
+ if lowered.startswith("/testing"):
+ text = _clean_text(text[len("/testing") :])
+
+ prefixes = [
+ "为以下需求生成测试用例",
+ "根据以下需求生成测试用例",
+ "请根据以下需求生成测试用例",
+ "请根据需求生成测试用例",
+ "请生成测试用例",
+ "生成测试用例",
+ ]
+ for prefix in prefixes:
+ if text.startswith(prefix):
+ for sep in (":", ":"):
+ idx = text.find(sep)
+ if idx != -1:
+ text = _clean_text(text[idx + 1 :])
+ break
+ else:
+ text = _clean_text(text[len(prefix) :])
+ break
+
+ pattern = re.compile(r"^(请)?(根据|按|基于).{0,40}(需求|场景).{0,30}(生成|输出).{0,20}(测试项|测试用例)[::]")
+ matched = pattern.match(text)
+ if matched:
+ text = _clean_text(text[matched.end() :])
+
+ return text
+
+
+def _extract_focus_points(value: str, max_points: int = 6) -> List[str]:
+ text = _strip_instruction_prefix(value)
+ if not text:
+ return []
+
+ parts = [_clean_text(part) for part in re.split(r"[,,。;;]", text)]
+ parts = [part for part in parts if part]
+
+ ignored_tokens = ["生成测试用例", "测试项分解", "测试用例生成", "以下需求"]
+ filtered = [
+ part
+ for part in parts
+ if len(part) >= 4 and not any(token in part for token in ignored_tokens)
+ ]
+ if not filtered:
+ filtered = parts
+
+ priority_keywords = [
+ "启停",
+ "开启",
+ "关闭",
+ "远程控制",
+ "保护",
+ "联动",
+ "状态",
+ "故障",
+ "恢复",
+ "切换",
+ "告警",
+ "模式",
+ "边界",
+ "时序",
+ ]
+ priority = [part for part in filtered if any(keyword in part for keyword in priority_keywords)]
+ candidates = priority if priority else filtered
+
+ unique: List[str] = []
+ for part in candidates:
+ if part not in unique:
+ unique.append(part)
+
+ return unique[:max_points]
+
+
+def _build_type_scores(text: str) -> Dict[str, int]:
+ scores: Dict[str, int] = {}
+ lowered = text.lower()
+
+ for req_type, rule in REQUIREMENT_RULES.items():
+ score = 0
+ if req_type in text:
+ score += 5
+ for keyword in rule.get("keywords", []):
+ if keyword.lower() in lowered:
+ score += 2
+ scores[req_type] = score
+
+ return scores
+
+
+def _top_candidates(scores: Dict[str, int], top_n: int = 3) -> List[str]:
+ sorted_pairs = sorted(scores.items(), key=lambda pair: pair[1], reverse=True)
+ non_zero = [name for name, score in sorted_pairs if score > 0]
+ if non_zero:
+ return non_zero[:top_n]
+ return ["功能测试", "边界测试", "性能测试"][:top_n]
+
+
+def _message_to_text(value: Any) -> str:
+ content = getattr(value, "content", value)
+ if isinstance(content, str):
+ return content
+ if isinstance(content, list):
+ chunks: List[str] = []
+ for item in content:
+ if isinstance(item, str):
+ chunks.append(item)
+ elif isinstance(item, dict):
+ text = item.get("text")
+ if isinstance(text, str):
+ chunks.append(text)
+ else:
+ chunks.append(str(item))
+ return "".join(chunks)
+ return str(content)
+
+
+def _extract_json_object(value: str) -> Optional[Dict[str, Any]]:
+ text = (value or "").strip()
+ if not text:
+ return None
+
+ if text.startswith("```"):
+ text = re.sub(r"^```(?:json)?", "", text, flags=re.IGNORECASE).strip()
+ if text.endswith("```"):
+ text = text[:-3].strip()
+
+ try:
+ data = json.loads(text)
+ if isinstance(data, dict):
+ return data
+ except Exception:
+ pass
+
+ start = text.find("{")
+ if start == -1:
+ return None
+
+ depth = 0
+ for idx in range(start, len(text)):
+ ch = text[idx]
+ if ch == "{":
+ depth += 1
+ elif ch == "}":
+ depth -= 1
+ if depth == 0:
+ fragment = text[start : idx + 1]
+ try:
+ data = json.loads(fragment)
+ if isinstance(data, dict):
+ return data
+ except Exception:
+ return None
+ return None
+
+
+def _invoke_llm_json(context: Dict[str, Any], prompt: str) -> Optional[Dict[str, Any]]:
+ model = context.get("llm_model")
+ if model is None or not context.get("use_model_generation"):
+ return None
+
+ budget = context.get("llm_call_budget")
+ if isinstance(budget, int):
+ if budget <= 0:
+ return None
+ context["llm_call_budget"] = budget - 1
+
+ try:
+ response = model.invoke(prompt)
+ text = _message_to_text(response)
+ return _extract_json_object(text)
+ except Exception:
+ return None
+
+
+def _invoke_llm_text(context: Dict[str, Any], prompt: str) -> str:
+ model = context.get("llm_model")
+ if model is None or not context.get("use_model_generation"):
+ return ""
+
+ budget = context.get("llm_call_budget")
+ if isinstance(budget, int):
+ if budget <= 0:
+ return ""
+ context["llm_call_budget"] = budget - 1
+
+ try:
+ response = model.invoke(prompt)
+ return _clean_text(_message_to_text(response))
+ except Exception:
+ return ""
+
+
+def _normalize_item_entry(item: Any) -> Optional[Dict[str, Any]]:
+ if isinstance(item, str):
+ content = _clean_text(item)
+ if not content:
+ return None
+ return {"content": content, "coverage_tags": []}
+
+ if isinstance(item, dict):
+ content = _clean_text(str(item.get("content", "")))
+ if not content:
+ return None
+ tags = item.get("coverage_tags") or item.get("covered_points") or []
+ if not isinstance(tags, list):
+ tags = [str(tags)]
+ tags = [_clean_text(str(tag)) for tag in tags if _clean_text(str(tag))]
+ return {"content": content, "coverage_tags": tags}
+
+ return None
+
+
+def _dedupe_items(items: List[Dict[str, Any]], max_items: int) -> List[Dict[str, Any]]:
+ merged: Dict[str, Dict[str, Any]] = {}
+ for item in items:
+ content = _clean_text(item.get("content", ""))
+ if not content:
+ continue
+ existing = merged.get(content)
+ if existing is None:
+ merged[content] = {
+ "content": content,
+ "coverage_tags": list(item.get("coverage_tags") or []),
+ }
+ else:
+ existing_tags = set(existing.get("coverage_tags") or [])
+ for tag in item.get("coverage_tags") or []:
+ if tag and tag not in existing_tags:
+ existing_tags.add(tag)
+ existing["coverage_tags"] = list(existing_tags)
+
+ deduped = list(merged.values())
+ return deduped[:max_items]
+
+
+def _pick_expected_result_placeholder(content: str, abnormal: bool) -> str:
+ text = content or ""
+
+ if abnormal or any(token in text for token in ["非法", "异常", "错误", "拒绝", "越界", "失败"]):
+ return "{{error_message}}"
+ if any(token in text for token in ["状态", "切换", "转换", "恢复"]):
+ return "{{state_change}}"
+ if any(token in text for token in ["数据库", "存储", "落库", "持久化"]):
+ return "{{data_persistence}}"
+ if any(token in text for token in ["界面", "UI", "页面", "按钮", "提示"]):
+ return "{{ui_display}}"
+ return "{{return_value}}"
+
+
+class IdentifyRequirementTypeTool(TestingTool):
+ name = "identify-requirement-type"
+
+ def execute(self, context: Dict[str, Any]) -> ToolExecutionResult:
+ raw_text = _clean_text(context.get("user_requirement_text", ""))
+ text = _strip_instruction_prefix(raw_text)
+ if not text:
+ text = raw_text
+
+ max_focus_points = _safe_int(context.get("max_focus_points"), 6, 3, 12)
+ provided_type = _clean_text(context.get("requirement_type_input", ""))
+ focus_points = _extract_focus_points(text, max_points=max_focus_points)
+ fallback_used = False
+
+ if provided_type in REQUIREMENT_TYPES:
+ result = {
+ "requirement_type": provided_type,
+ "reason": "用户已显式指定需求类型,系统按指定类型执行。",
+ "candidates": [],
+ "scores": {},
+ "secondary_types": [],
+ }
+ else:
+ scores = _build_type_scores(text)
+ sorted_pairs = sorted(scores.items(), key=lambda pair: pair[1], reverse=True)
+ best_type, best_score = sorted_pairs[0]
+ secondary = [name for name, score in sorted_pairs[1:4] if score > 0]
+
+ if best_score <= 0:
+ fallback_used = True
+ candidates = _top_candidates(scores)
+ result = {
+ "requirement_type": "未知类型",
+ "reason": "未命中明确分类规则,已回退到未知类型并提供最接近候选。",
+ "candidates": candidates,
+ "scores": scores,
+ "secondary_types": [],
+ }
+ else:
+ signal = TYPE_SIGNAL_RULES.get(best_type, "")
+ result = {
+ "requirement_type": best_type,
+ "reason": f"命中{best_type}识别信号。{signal}",
+ "candidates": [],
+ "scores": scores,
+ "secondary_types": secondary,
+ }
+
+ context["requirement_type_result"] = result
+ context["normalized_requirement_text"] = text
+ context["requirement_focus_points"] = focus_points
+ context["knowledge_used"] = bool(context.get("knowledge_context"))
+
+ return ToolExecutionResult(
+ context=context,
+ output_summary=(
+ f"type={result['requirement_type']}; candidates={len(result['candidates'])}; "
+ f"secondary_types={len(result.get('secondary_types', []))}; focus_points={len(focus_points)}"
+ ),
+ fallback_used=fallback_used,
+ )
+
+
+class DecomposeTestItemsTool(TestingTool):
+ name = "decompose-test-items"
+
+ @staticmethod
+ def _seed_items(
+ req_type: str,
+ req_text: str,
+ focus_points: List[str],
+ max_items: int,
+ ) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
+ if req_type in REQUIREMENT_RULES:
+ source_rules = REQUIREMENT_RULES[req_type]
+ normal_templates = list(source_rules.get("normal", []))
+ abnormal_templates = list(source_rules.get("abnormal", []))
+ else:
+ normal_templates = list(GENERIC_DECOMPOSITION_RULES["normal"])
+ abnormal_templates = list(GENERIC_DECOMPOSITION_RULES["abnormal"])
+
+ normal: List[Dict[str, Any]] = []
+ abnormal: List[Dict[str, Any]] = []
+
+ for template in normal_templates:
+ normal.append({"content": template, "coverage_tags": [req_type]})
+ for template in abnormal_templates:
+ abnormal.append({"content": template, "coverage_tags": [req_type]})
+
+ for point in focus_points:
+ normal.extend(
+ [
+ {
+ "content": f"验证{point}在标准作业流程下稳定执行且结果符合业务约束。",
+ "coverage_tags": [point, "正常流程"],
+ },
+ {
+ "content": f"验证{point}与相关联动控制、状态同步和回执反馈的一致性。",
+ "coverage_tags": [point, "联动一致性"],
+ },
+ ]
+ )
+ abnormal.extend(
+ [
+ {
+ "content": f"验证{point}在非法输入、错误指令或权限异常时的保护与拒绝机制。",
+ "coverage_tags": [point, "异常输入"],
+ },
+ {
+ "content": f"验证{point}在边界条件、时序冲突或设备故障下的告警和恢复行为。",
+ "coverage_tags": [point, "边界异常"],
+ },
+ ]
+ )
+
+ if any(token in req_text for token in ["手册", "操作手册", "用户手册", "作业指导"]):
+ normal.append(
+ {
+ "content": "验证需求说明未显式给出但在用户手册或操作手册体现的功能流程。",
+ "coverage_tags": ["手册功能"],
+ }
+ )
+
+ return _dedupe_items(normal, max_items), _dedupe_items(abnormal, max_items)
+
+ @staticmethod
+ def _generate_by_llm(context: Dict[str, Any]) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
+ req_result = context.get("requirement_type_result", {})
+ req_type = req_result.get("requirement_type", "未知类型")
+ req_text = context.get("normalized_requirement_text", "")
+ focus_points = context.get("requirement_focus_points", [])
+ max_items = _safe_int(context.get("max_items_per_group"), 12, 4, 30)
+ knowledge_context = _truncate_text(context.get("knowledge_context", ""), max_len=2500)
+
+ prompt = f"""
+你是资深测试分析师。请根据需求、分解规则和知识库片段,生成尽可能覆盖要点的测试项。
+
+需求文本:{req_text}
+需求类型:{req_type}
+需求要点:{focus_points}
+知识库片段:{knowledge_context or '无'}
+
+分解约束:
+1. 正常测试与异常测试必须分组输出。
+2. 每条测试项必须可执行、可验证,避免模板化空话。
+3. 尽可能覆盖全部需求要点;每组建议输出6-{max_items}条。
+4. 优先生成与需求对象/控制逻辑/异常处理/边界条件强相关的测试项。
+
+请仅输出 JSON 对象,结构如下:
+{{
+ "normal_test_items": [
+ {{"content": "...", "coverage_tags": ["..."]}}
+ ],
+ "abnormal_test_items": [
+ {{"content": "...", "coverage_tags": ["..."]}}
+ ]
+}}
+""".strip()
+
+ data = _invoke_llm_json(context, prompt)
+ if not data:
+ return [], []
+
+ normal_raw = data.get("normal_test_items", [])
+ abnormal_raw = data.get("abnormal_test_items", [])
+
+ normal: List[Dict[str, Any]] = []
+ abnormal: List[Dict[str, Any]] = []
+
+ for item in normal_raw if isinstance(normal_raw, list) else []:
+ normalized = _normalize_item_entry(item)
+ if normalized:
+ normal.append(normalized)
+
+ for item in abnormal_raw if isinstance(abnormal_raw, list) else []:
+ normalized = _normalize_item_entry(item)
+ if normalized:
+ abnormal.append(normalized)
+
+ return _dedupe_items(normal, max_items), _dedupe_items(abnormal, max_items)
+
+ def execute(self, context: Dict[str, Any]) -> ToolExecutionResult:
+ req_result = context.get("requirement_type_result", {})
+ req_type = req_result.get("requirement_type", "未知类型")
+ req_text = context.get("normalized_requirement_text") or _strip_instruction_prefix(
+ context.get("user_requirement_text", "")
+ )
+ focus_points = context.get("requirement_focus_points", [])
+ max_items = _safe_int(context.get("max_items_per_group"), 12, 4, 30)
+
+ seeded_normal, seeded_abnormal = self._seed_items(req_type, req_text, focus_points, max_items)
+ llm_normal, llm_abnormal = self._generate_by_llm(context)
+
+ merged_normal = _dedupe_items(llm_normal + seeded_normal, max_items)
+ merged_abnormal = _dedupe_items(llm_abnormal + seeded_abnormal, max_items)
+
+ fallback_used = not bool(llm_normal or llm_abnormal)
+
+ normal_items: List[Dict[str, Any]] = []
+ abnormal_items: List[Dict[str, Any]] = []
+
+ for idx, item in enumerate(merged_normal, start=1):
+ normal_items.append(
+ {
+ "id": f"N{idx}",
+ "content": item["content"],
+ "coverage_tags": item.get("coverage_tags", []),
+ }
+ )
+
+ for idx, item in enumerate(merged_abnormal, start=1):
+ abnormal_items.append(
+ {
+ "id": f"E{idx}",
+ "content": item["content"],
+ "coverage_tags": item.get("coverage_tags", []),
+ }
+ )
+
+ context["test_items"] = {
+ "normal": normal_items,
+ "abnormal": abnormal_items,
+ }
+ context["decompose_force_rules"] = DECOMPOSE_FORCE_RULES
+
+ return ToolExecutionResult(
+ context=context,
+ output_summary=(
+ f"normal_items={len(normal_items)}; abnormal_items={len(abnormal_items)}; "
+ f"llm_items={len(llm_normal) + len(llm_abnormal)}"
+ ),
+ fallback_used=fallback_used,
+ )
+
+
+class GenerateTestCasesTool(TestingTool):
+ name = "generate-test-cases"
+
+ @staticmethod
+ def _build_fallback_steps(item_content: str, abnormal: bool, variant: str) -> List[str]:
+ if abnormal:
+ return [
+ "确认测试前置环境、设备状态与日志采集开关已准备就绪。",
+ f"准备异常场景“{variant}”所需的输入数据、操作账号和触发条件。",
+ f"在目标对象执行异常触发操作,重点验证:{item_content}",
+ "持续观察系统返回码、错误文案、告警信息与日志链路完整性。",
+ "检查保护机制是否生效,包括拒绝策略、回滚行为和状态一致性。",
+ "记录证据并复位环境,确认异常处理后系统可恢复到稳定状态。",
+ ]
+
+ return [
+ "确认测试环境、设备连接状态和前置业务数据均已初始化。",
+ f"准备“{variant}”所需输入参数、操作路径和判定阈值。",
+ f"在目标对象执行业务控制流程,重点验证:{item_content}",
+ "校验关键返回值、状态变化、控制回执及界面或接口反馈结果。",
+ "检查联动模块、日志记录和数据落库是否满足一致性要求。",
+ "沉淀测试证据并恢复环境,确保后续用例可重复执行。",
+ ]
+
+ def _generate_cases_by_llm(
+ self,
+ context: Dict[str, Any],
+ item: Dict[str, Any],
+ abnormal: bool,
+ cases_per_item: int,
+ ) -> List[Dict[str, Any]]:
+ req_text = context.get("normalized_requirement_text", "")
+ knowledge_context = _truncate_text(context.get("knowledge_context", ""), max_len=1800)
+
+ prompt = f"""
+你是资深测试工程师。请围绕给定测试项生成详细测试用例。
+
+需求:{req_text}
+测试项:{item.get('content', '')}
+测试类型:{'异常测试' if abnormal else '正常测试'}
+知识库片段:{knowledge_context or '无'}
+
+要求:
+1. 生成 {cases_per_item}-{max(cases_per_item + 1, cases_per_item)} 条测试用例。
+2. 每条用例包含 test_content 与 operation_steps。
+3. operation_steps 必须详细,至少5步,包含前置、执行、观察、校验与证据留存。
+4. 内容必须围绕当前测试项,不要输出空洞模板。
+
+仅输出 JSON:
+{{
+ "test_cases": [
+ {{
+ "title": "...",
+ "test_content": "...",
+ "operation_steps": ["...", "..."]
+ }}
+ ]
+}}
+""".strip()
+
+ data = _invoke_llm_json(context, prompt)
+ if not data:
+ return []
+
+ raw_cases = data.get("test_cases", [])
+ if not isinstance(raw_cases, list):
+ return []
+
+ normalized_cases: List[Dict[str, Any]] = []
+ for case in raw_cases:
+ if not isinstance(case, dict):
+ continue
+ test_content = _clean_text(str(case.get("test_content", "")))
+ if not test_content:
+ continue
+ steps = case.get("operation_steps", [])
+ if not isinstance(steps, list):
+ continue
+ cleaned_steps = [_clean_text(str(step)) for step in steps if _clean_text(str(step))]
+ if len(cleaned_steps) < 5:
+ continue
+ normalized_cases.append(
+ {
+ "title": _clean_text(str(case.get("title", ""))),
+ "test_content": test_content,
+ "operation_steps": cleaned_steps,
+ }
+ )
+
+ return normalized_cases[: max(1, cases_per_item)]
+
+ def execute(self, context: Dict[str, Any]) -> ToolExecutionResult:
+ test_items = context.get("test_items", {})
+ cases_per_item = _safe_int(context.get("cases_per_item"), 2, 1, 5)
+
+ normal_cases: List[Dict[str, Any]] = []
+ abnormal_cases: List[Dict[str, Any]] = []
+ llm_case_count = 0
+
+ for item in test_items.get("normal", []):
+ generated = self._generate_cases_by_llm(context, item, abnormal=False, cases_per_item=cases_per_item)
+ if not generated:
+ generated = [
+ {
+ "title": "标准流程验证",
+ "test_content": f"验证{item['content']}",
+ "operation_steps": self._build_fallback_steps(item["content"], False, "标准流程"),
+ },
+ {
+ "title": "边界与联动验证",
+ "test_content": f"验证{item['content']}在边界条件和联动场景下的稳定性",
+ "operation_steps": self._build_fallback_steps(item["content"], False, "边界与联动"),
+ },
+ ][:cases_per_item]
+ else:
+ llm_case_count += len(generated)
+
+ for idx, case in enumerate(generated, start=1):
+ merged_content = _clean_text(case.get("test_content", item["content"]))
+ placeholder = _pick_expected_result_placeholder(merged_content, abnormal=False)
+ normal_cases.append(
+ {
+ "id": f"{item['id']}-C{idx}",
+ "item_id": item["id"],
+ "title": _clean_text(case.get("title", "")),
+ "operation_steps": case.get("operation_steps", []),
+ "test_content": merged_content,
+ "expected_result_placeholder": placeholder,
+ }
+ )
+
+ for item in test_items.get("abnormal", []):
+ generated = self._generate_cases_by_llm(context, item, abnormal=True, cases_per_item=cases_per_item)
+ if not generated:
+ generated = [
+ {
+ "title": "非法输入与权限异常验证",
+ "test_content": f"验证{item['content']}在非法输入与权限异常下的处理表现",
+ "operation_steps": self._build_fallback_steps(item["content"], True, "非法输入与权限异常"),
+ },
+ {
+ "title": "故障与时序冲突验证",
+ "test_content": f"验证{item['content']}在故障和时序冲突场景下的保护行为",
+ "operation_steps": self._build_fallback_steps(item["content"], True, "故障与时序冲突"),
+ },
+ ][:cases_per_item]
+ else:
+ llm_case_count += len(generated)
+
+ for idx, case in enumerate(generated, start=1):
+ merged_content = _clean_text(case.get("test_content", item["content"]))
+ placeholder = _pick_expected_result_placeholder(merged_content, abnormal=True)
+ abnormal_cases.append(
+ {
+ "id": f"{item['id']}-C{idx}",
+ "item_id": item["id"],
+ "title": _clean_text(case.get("title", "")),
+ "operation_steps": case.get("operation_steps", []),
+ "test_content": merged_content,
+ "expected_result_placeholder": placeholder,
+ }
+ )
+
+ context["test_cases"] = {
+ "normal": normal_cases,
+ "abnormal": abnormal_cases,
+ }
+
+ return ToolExecutionResult(
+ context=context,
+ output_summary=(
+ f"normal_cases={len(normal_cases)}; abnormal_cases={len(abnormal_cases)}; llm_cases={llm_case_count}"
+ ),
+ fallback_used=llm_case_count == 0,
+ )
+
+
+class BuildExpectedResultsTool(TestingTool):
+ name = "build_expected_results"
+
+ def _expected_for_case(self, context: Dict[str, Any], case: Dict[str, Any], abnormal: bool) -> str:
+ placeholder = case.get("expected_result_placeholder", "{{return_value}}")
+ if placeholder not in EXPECTED_RESULT_PLACEHOLDER_MAP:
+ placeholder = "{{return_value}}"
+
+ req_text = context.get("normalized_requirement_text", "")
+ knowledge_context = _truncate_text(context.get("knowledge_context", ""), max_len=1200)
+ prompt = f"""
+请基于以下信息生成一条可验证、可度量的测试预期结果,避免模板化空话。
+
+需求:{req_text}
+测试内容:{case.get('test_content', '')}
+测试类型:{'异常测试' if abnormal else '正常测试'}
+占位符语义:{placeholder} -> {EXPECTED_RESULT_PLACEHOLDER_MAP.get(placeholder, '')}
+知识库片段:{knowledge_context or '无'}
+
+输出要求:
+1. 仅输出一句中文预期结果。
+2. 结果必须可判定成功/失败。
+3. 包含关键观测项(返回值、状态、告警、日志、数据一致性中的相关项)。
+""".strip()
+
+ llm_text = _invoke_llm_text(context, prompt)
+ if llm_text:
+ return _truncate_text(llm_text, max_len=220)
+
+ test_content = _clean_text(case.get("test_content", ""))
+ if placeholder == "{{error_message}}":
+ return f"触发{test_content}后,系统应返回明确错误码与错误文案,拒绝非法请求且核心状态保持一致。"
+ if placeholder == "{{state_change}}":
+ return f"执行{test_content}后,系统状态转换应符合需求定义,状态变化可被日志与回执共同验证。"
+ if placeholder == "{{data_persistence}}":
+ return f"执行{test_content}后,数据库或存储层应产生符合约束的持久化结果且无脏数据。"
+ if placeholder == "{{ui_display}}":
+ return f"执行{test_content}后,界面应展示与控制结果一致的反馈信息且提示可被用户执行。"
+
+ if abnormal:
+ return f"执行异常场景“{test_content}”后,系统应触发保护策略并输出可追溯日志,业务状态保持可恢复。"
+
+ return f"执行“{test_content}”后,返回值与状态变化应满足需求约束,关键结果可通过日志或回执验证。"
+
+ def execute(self, context: Dict[str, Any]) -> ToolExecutionResult:
+ test_cases = context.get("test_cases", {})
+
+ normal_expected: List[Dict[str, str]] = []
+ abnormal_expected: List[Dict[str, str]] = []
+
+ for case in test_cases.get("normal", []):
+ normal_expected.append(
+ {
+ "id": case["id"],
+ "case_id": case["id"],
+ "result": self._expected_for_case(context, case, abnormal=False),
+ }
+ )
+
+ for case in test_cases.get("abnormal", []):
+ abnormal_expected.append(
+ {
+ "id": case["id"],
+ "case_id": case["id"],
+ "result": self._expected_for_case(context, case, abnormal=True),
+ }
+ )
+
+ context["expected_results"] = {
+ "normal": normal_expected,
+ "abnormal": abnormal_expected,
+ }
+
+ return ToolExecutionResult(
+ context=context,
+ output_summary=(
+ f"normal_expected={len(normal_expected)}; abnormal_expected={len(abnormal_expected)}"
+ ),
+ )
+
+
+class FormatOutputTool(TestingTool):
+ name = "format_output"
+
+ @staticmethod
+ def _format_case_block(case: Dict[str, Any], index: int) -> List[str]:
+ item_id = case.get("item_id", case.get("id", ""))
+ title = _clean_text(case.get("title", ""))
+
+ block: List[str] = []
+ block.append(f"{index}. [用例 {case['id']}](对应测试项 {item_id}):{case.get('test_content', '')}")
+ if title:
+ block.append(f" 场景标题:{title}")
+ block.append(" 操作步骤:")
+ for step_idx, step in enumerate(case.get("operation_steps", []), start=1):
+ block.append(f" {step_idx}) {step}")
+ return block
+
+ def execute(self, context: Dict[str, Any]) -> ToolExecutionResult:
+ test_items = context.get("test_items", {"normal": [], "abnormal": []})
+ test_cases = context.get("test_cases", {"normal": [], "abnormal": []})
+ expected_results = context.get("expected_results", {"normal": [], "abnormal": []})
+
+ lines: List[str] = []
+
+ lines.append("**测试项**")
+ lines.append("")
+ lines.append("**正常测试**:")
+ for index, item in enumerate(test_items.get("normal", []), start=1):
+ lines.append(f"{index}. [测试项 {item['id']}]:{item['content']}")
+ lines.append("")
+ lines.append("**异常测试**:")
+ for index, item in enumerate(test_items.get("abnormal", []), start=1):
+ lines.append(f"{index}. [测试项 {item['id']}]:{item['content']}")
+
+ lines.append("")
+ lines.append("**测试用例**")
+ lines.append("")
+ lines.append("**正常测试**:")
+ for index, case in enumerate(test_cases.get("normal", []), start=1):
+ lines.extend(self._format_case_block(case, index))
+ lines.append("")
+ lines.append("**异常测试**:")
+ for index, case in enumerate(test_cases.get("abnormal", []), start=1):
+ lines.extend(self._format_case_block(case, index))
+
+ lines.append("")
+ lines.append("**预期成果**")
+ lines.append("")
+ lines.append("**正常测试**:")
+ for index, expected in enumerate(expected_results.get("normal", []), start=1):
+ lines.append(
+ f"{index}. [预期 {expected['id']}](对应用例 {expected['case_id']}):{expected['result']}"
+ )
+ lines.append("")
+ lines.append("**异常测试**:")
+ for index, expected in enumerate(expected_results.get("abnormal", []), start=1):
+ lines.append(
+ f"{index}. [预期 {expected['id']}](对应用例 {expected['case_id']}):{expected['result']}"
+ )
+
+ context["formatted_output"] = "\n".join(lines)
+ context["structured_output"] = {
+ "test_items": test_items,
+ "test_cases": test_cases,
+ "expected_results": expected_results,
+ }
+
+ return ToolExecutionResult(
+ context=context,
+ output_summary="formatted_sections=3",
+ )
+
+
+def build_default_tool_chain() -> List[TestingTool]:
+ return [
+ IdentifyRequirementTypeTool(),
+ DecomposeTestItemsTool(),
+ GenerateTestCasesTool(),
+ BuildExpectedResultsTool(),
+ FormatOutputTool(),
+ ]
diff --git a/rag-web-ui/backend/app/services/vector_schema.py b/rag-web-ui/backend/app/services/vector_schema.py
new file mode 100644
index 0000000..0e9d2f5
--- /dev/null
+++ b/rag-web-ui/backend/app/services/vector_schema.py
@@ -0,0 +1,122 @@
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from typing import Any, Dict, List, Optional
+
+
+@dataclass
+class ChunkVectorMetadata:
+ """Metadata payload for vector DB and graph linkage."""
+
+ chunk_id: str
+ kb_id: int
+ document_id: int
+ document_name: str
+ document_path: str
+ chunk_index: int
+ chunk_text: str
+ token_count: int
+ language: str = "zh"
+ source_type: str = "document"
+ mission_phase: Optional[str] = None
+ section_title: Optional[str] = None
+ publish_time: Optional[str] = None
+ extracted_entities: List[str] = field(default_factory=list)
+ extracted_entity_types: List[str] = field(default_factory=list)
+ extracted_relations: List[Dict[str, Any]] = field(default_factory=list)
+ graph_node_ids: List[str] = field(default_factory=list)
+ graph_edge_ids: List[str] = field(default_factory=list)
+ community_ids: List[str] = field(default_factory=list)
+ embedding_model: str = ""
+ embedding_dim: int = 0
+ ingest_time: str = field(
+ default_factory=lambda: datetime.now(timezone.utc).isoformat()
+ )
+
+ def to_payload(self) -> Dict[str, Any]:
+ return {
+ "chunk_id": self.chunk_id,
+ "kb_id": self.kb_id,
+ "document_id": self.document_id,
+ "document_name": self.document_name,
+ "document_path": self.document_path,
+ "chunk_index": self.chunk_index,
+ "chunk_text": self.chunk_text,
+ "token_count": self.token_count,
+ "language": self.language,
+ "source_type": self.source_type,
+ "mission_phase": self.mission_phase,
+ "section_title": self.section_title,
+ "publish_time": self.publish_time,
+ "extracted_entities": self.extracted_entities,
+ "extracted_entity_types": self.extracted_entity_types,
+ "extracted_relations": self.extracted_relations,
+ "graph_node_ids": self.graph_node_ids,
+ "graph_edge_ids": self.graph_edge_ids,
+ "community_ids": self.community_ids,
+ "embedding_model": self.embedding_model,
+ "embedding_dim": self.embedding_dim,
+ "ingest_time": self.ingest_time,
+ }
+
+
+def qdrant_collection_schema(collection_name: str, vector_size: int) -> Dict[str, Any]:
+ """Qdrant collection and payload index recommendations."""
+ return {
+ "collection_name": collection_name,
+ "vectors": {
+ "size": vector_size,
+ "distance": "Cosine",
+ },
+ "payload_indexes": [
+ {"field_name": "kb_id", "field_schema": "integer"},
+ {"field_name": "document_id", "field_schema": "integer"},
+ {"field_name": "document_name", "field_schema": "keyword"},
+ {"field_name": "chunk_id", "field_schema": "keyword"},
+ {"field_name": "mission_phase", "field_schema": "keyword"},
+ {"field_name": "community_ids", "field_schema": "keyword"},
+ {"field_name": "extracted_entities", "field_schema": "keyword"},
+ {"field_name": "ingest_time", "field_schema": "datetime"},
+ ],
+ }
+
+
+def milvus_collection_schema(collection_name: str, vector_size: int) -> Dict[str, Any]:
+ """Milvus field design for vector+graph linkage."""
+ return {
+ "collection_name": collection_name,
+ "fields": [
+ {"name": "id", "type": "VARCHAR", "max_length": 64, "is_primary": True},
+ {"name": "kb_id", "type": "INT64"},
+ {"name": "document_id", "type": "INT64"},
+ {"name": "chunk_index", "type": "INT32"},
+ {"name": "document_name", "type": "VARCHAR", "max_length": 255},
+ {"name": "mission_phase", "type": "VARCHAR", "max_length": 64},
+ {"name": "community_ids", "type": "VARCHAR", "max_length": 512},
+ {"name": "extracted_entities", "type": "VARCHAR", "max_length": 2048},
+ {"name": "ingest_time", "type": "VARCHAR", "max_length": 64},
+ {"name": "embedding", "type": "FLOAT_VECTOR", "dim": vector_size},
+ ],
+ "index": {
+ "field_name": "embedding",
+ "index_type": "HNSW",
+ "metric_type": "COSINE",
+ "params": {"M": 16, "efConstruction": 200},
+ },
+ }
+
+
+DOCUMENT_CHUNK_METADATA_DDL = """
+ALTER TABLE document_chunks
+ADD COLUMN IF NOT EXISTS chunk_index INT NULL,
+ADD COLUMN IF NOT EXISTS token_count INT NULL,
+ADD COLUMN IF NOT EXISTS language VARCHAR(16) DEFAULT 'zh',
+ADD COLUMN IF NOT EXISTS mission_phase VARCHAR(64) NULL,
+ADD COLUMN IF NOT EXISTS extracted_entities JSON NULL,
+ADD COLUMN IF NOT EXISTS extracted_entity_types JSON NULL,
+ADD COLUMN IF NOT EXISTS extracted_relations JSON NULL,
+ADD COLUMN IF NOT EXISTS graph_node_ids JSON NULL,
+ADD COLUMN IF NOT EXISTS graph_edge_ids JSON NULL,
+ADD COLUMN IF NOT EXISTS community_ids JSON NULL,
+ADD COLUMN IF NOT EXISTS embedding_model VARCHAR(128) NULL,
+ADD COLUMN IF NOT EXISTS embedding_dim INT NULL;
+""".strip()
diff --git a/rag-web-ui/backend/app/services/vector_store/__init__.py b/rag-web-ui/backend/app/services/vector_store/__init__.py
new file mode 100644
index 0000000..1b3ec6c
--- /dev/null
+++ b/rag-web-ui/backend/app/services/vector_store/__init__.py
@@ -0,0 +1,11 @@
+from .base import BaseVectorStore
+from .chroma import ChromaVectorStore
+from .qdrant import QdrantStore
+from .factory import VectorStoreFactory
+
+__all__ = [
+ 'BaseVectorStore',
+ 'ChromaVectorStore',
+ 'QdrantStore',
+ 'VectorStoreFactory'
+]
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/services/vector_store/base.py b/rag-web-ui/backend/app/services/vector_store/base.py
new file mode 100644
index 0000000..aef2bd1
--- /dev/null
+++ b/rag-web-ui/backend/app/services/vector_store/base.py
@@ -0,0 +1,42 @@
+from abc import ABC, abstractmethod
+from typing import List, Optional, Dict, Any
+from langchain_core.documents import Document
+from langchain_core.embeddings import Embeddings
+
+class BaseVectorStore(ABC):
+ """Abstract base class for vector store implementations"""
+
+ @abstractmethod
+ def __init__(self, collection_name: str, embedding_function: Embeddings, **kwargs):
+ """Initialize the vector store"""
+ pass
+
+ @abstractmethod
+ def add_documents(self, documents: List[Document]) -> None:
+ """Add documents to the vector store"""
+ pass
+
+ @abstractmethod
+ def delete(self, ids: List[str]) -> None:
+ """Delete documents from the vector store"""
+ pass
+
+ @abstractmethod
+ def as_retriever(self, **kwargs: Any):
+ """Return a retriever interface for the vector store"""
+ pass
+
+ @abstractmethod
+ def similarity_search(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]:
+ """Search for similar documents"""
+ pass
+
+ @abstractmethod
+ def similarity_search_with_score(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]:
+ """Search for similar documents with score"""
+ pass
+
+ @abstractmethod
+ def delete_collection(self) -> None:
+ """Delete the entire collection"""
+ pass
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/services/vector_store/chroma.py b/rag-web-ui/backend/app/services/vector_store/chroma.py
new file mode 100644
index 0000000..59e0616
--- /dev/null
+++ b/rag-web-ui/backend/app/services/vector_store/chroma.py
@@ -0,0 +1,47 @@
+from typing import List, Any
+from langchain_core.documents import Document
+from langchain_core.embeddings import Embeddings
+from langchain_chroma import Chroma
+import chromadb
+from app.core.config import settings
+
+from .base import BaseVectorStore
+
+class ChromaVectorStore(BaseVectorStore):
+ """Chroma vector store implementation"""
+
+ def __init__(self, collection_name: str, embedding_function: Embeddings, **kwargs):
+ """Initialize Chroma vector store"""
+ chroma_client = chromadb.HttpClient(
+ host=settings.CHROMA_DB_HOST,
+ port=settings.CHROMA_DB_PORT,
+ )
+
+ self._store = Chroma(
+ client=chroma_client,
+ collection_name=collection_name,
+ embedding_function=embedding_function,
+ )
+ def add_documents(self, documents: List[Document]) -> None:
+ """Add documents to Chroma"""
+ self._store.add_documents(documents)
+
+ def delete(self, ids: List[str]) -> None:
+ """Delete documents from Chroma"""
+ self._store.delete(ids)
+
+ def as_retriever(self, **kwargs: Any):
+ """Return a retriever interface"""
+ return self._store.as_retriever(**kwargs)
+
+ def similarity_search(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]:
+ """Search for similar documents in Chroma"""
+ return self._store.similarity_search(query, k=k, **kwargs)
+
+ def similarity_search_with_score(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]:
+ """Search for similar documents in Chroma with score"""
+ return self._store.similarity_search_with_score(query, k=k, **kwargs)
+
+ def delete_collection(self) -> None:
+ """Delete the entire collection"""
+ self._store._client.delete_collection(self._store._collection.name)
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/services/vector_store/factory.py b/rag-web-ui/backend/app/services/vector_store/factory.py
new file mode 100644
index 0000000..9a1bee2
--- /dev/null
+++ b/rag-web-ui/backend/app/services/vector_store/factory.py
@@ -0,0 +1,59 @@
+from typing import Dict, Type, Any
+from langchain_core.embeddings import Embeddings
+
+from .base import BaseVectorStore
+from .chroma import ChromaVectorStore
+from .qdrant import QdrantStore
+
+class VectorStoreFactory:
+ """Factory for creating vector store instances"""
+
+ _stores: Dict[str, Type[BaseVectorStore]] = {
+ 'chroma': ChromaVectorStore,
+ 'qdrant': QdrantStore
+ }
+
+ @classmethod
+ def create(
+ cls,
+ store_type: str,
+ collection_name: str,
+ embedding_function: Embeddings,
+ **kwargs: Any
+ ) -> BaseVectorStore:
+ """Create a vector store instance
+
+ Args:
+ store_type: Type of vector store ('chroma', 'qdrant', etc.)
+ collection_name: Name of the collection
+ embedding_function: Embedding function to use
+ **kwargs: Additional arguments for specific vector store implementations
+
+ Returns:
+ An instance of the requested vector store
+
+ Raises:
+ ValueError: If store_type is not supported
+ """
+ store_class = cls._stores.get(store_type.lower())
+ if not store_class:
+ raise ValueError(
+ f"Unsupported vector store type: {store_type}. "
+ f"Supported types are: {', '.join(cls._stores.keys())}"
+ )
+
+ return store_class(
+ collection_name=collection_name,
+ embedding_function=embedding_function,
+ **kwargs
+ )
+
+ @classmethod
+ def register_store(cls, name: str, store_class: Type[BaseVectorStore]) -> None:
+ """Register a new vector store implementation
+
+ Args:
+ name: Name of the vector store type
+ store_class: Vector store class implementation
+ """
+ cls._stores[name.lower()] = store_class
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/services/vector_store/qdrant.py b/rag-web-ui/backend/app/services/vector_store/qdrant.py
new file mode 100644
index 0000000..fddba1c
--- /dev/null
+++ b/rag-web-ui/backend/app/services/vector_store/qdrant.py
@@ -0,0 +1,43 @@
+from typing import List, Any
+from langchain_core.documents import Document
+from langchain_core.embeddings import Embeddings
+from langchain_community.vectorstores import Qdrant
+from app.core.config import settings
+
+from .base import BaseVectorStore
+
+class QdrantStore(BaseVectorStore):
+ """Qdrant vector store implementation"""
+
+ def __init__(self, collection_name: str, embedding_function: Embeddings, **kwargs):
+ """Initialize Qdrant vector store"""
+ self._store = Qdrant(
+ collection_name=collection_name,
+ embeddings=embedding_function,
+ url=settings.QDRANT_URL,
+ prefer_grpc=settings.QDRANT_PREFER_GRPC
+ )
+
+ def add_documents(self, documents: List[Document]) -> None:
+ """Add documents to Qdrant"""
+ self._store.add_documents(documents)
+
+ def delete(self, ids: List[str]) -> None:
+ """Delete documents from Qdrant"""
+ self._store.delete(ids)
+
+ def as_retriever(self, **kwargs: Any):
+ """Return a retriever interface"""
+ return self._store.as_retriever(**kwargs)
+
+ def similarity_search(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]:
+ """Search for similar documents in Qdrant"""
+ return self._store.similarity_search(query, k=k, **kwargs)
+
+ def similarity_search_with_score(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]:
+ """Search for similar documents in Qdrant with score"""
+ return self._store.similarity_search_with_score(query, k=k, **kwargs)
+
+ def delete_collection(self) -> None:
+ """Delete the entire collection"""
+ self._store._client.delete_collection(self._store._collection_name)
\ No newline at end of file
diff --git a/rag-web-ui/backend/app/startup/migarate.py b/rag-web-ui/backend/app/startup/migarate.py
new file mode 100644
index 0000000..02e20a4
--- /dev/null
+++ b/rag-web-ui/backend/app/startup/migarate.py
@@ -0,0 +1,100 @@
+import logging
+from contextlib import contextmanager
+from pathlib import Path
+from typing import Generator, Tuple
+
+from alembic.config import Config
+from alembic.config import main as alembic_main
+from alembic.migration import MigrationContext
+from sqlalchemy import create_engine
+from sqlalchemy.engine import Connection
+
+logger = logging.getLogger(__name__)
+
+
+class DatabaseMigrator:
+ """
+ Database migrator class
+ """
+
+ def __init__(self, db_url: str):
+ self.db_url = db_url
+ self.alembic_cfg = self._get_alembic_config()
+
+ @contextmanager
+ def database_connection(self) -> Generator[Connection, None, None]:
+ """
+ Context manager for database connections with timeout
+
+ Yields:
+ SQLAlchemy connection object
+ """
+ engine = create_engine(
+ self.db_url, connect_args={"connect_timeout": 3} # 设置连接超时为3秒
+ )
+ try:
+ with engine.connect() as connection:
+ yield connection
+ except Exception as e:
+ logger.error(f"Database connection error: {e}")
+ raise
+
+ def check_migration_needed(self) -> Tuple[bool, str, str]:
+ """
+ Check if database migration is needed
+
+ Returns:
+ Tuple containing:
+ - bool: Whether migration is needed
+ - str: Current revision
+ - str: Head revision
+ """
+ with self.database_connection() as connection:
+ context = MigrationContext.configure(connection)
+ current_rev = context.get_current_revision()
+ heads = context.get_current_heads()
+
+ if not heads:
+ logger.warning("No migration heads found. Database might not be initialized.")
+ return True, current_rev or "None", "head"
+
+ head_rev = heads[0]
+ return current_rev != head_rev, current_rev or "None", head_rev
+
+ def _get_alembic_config(self) -> Config:
+ """
+ Create and configure Alembic config
+
+ Returns:
+ Alembic config object
+ """
+ project_root = Path(__file__).resolve().parents[2] # Go up 3 levels from migrate.py
+ alembic_cfg = Config(project_root / "alembic.ini")
+ alembic_cfg.set_main_option("sqlalchemy.url", self.db_url)
+ return alembic_cfg
+
+ def run_migrations(self) -> None:
+ """
+ Run database migrations if needed
+
+ Raises:
+ Exception: If migration fails
+ """
+ try:
+ # Check if migration is needed
+ needs_migration, current_rev, head_rev = self.check_migration_needed()
+
+ if needs_migration:
+ logger.info(f"Current revision: {current_rev}, upgrading to: {head_rev}")
+ self.alembic_cfg.set_main_option("sqlalchemy.url", self.db_url)
+
+ # 执行 alembic 升级
+ alembic_main(argv=["--raiseerr", "upgrade", "head"], config=self.alembic_cfg)
+
+ logger.info("Database migrations completed successfully")
+ else:
+ logger.info(f"Database is already at the latest version: {current_rev}")
+
+ except Exception as e:
+ logger.error(f"Error during database migration: {e}")
+ raise
diff --git a/rag-web-ui/backend/app/tools/__init__.py b/rag-web-ui/backend/app/tools/__init__.py
new file mode 100644
index 0000000..8e9d4dc
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/__init__.py
@@ -0,0 +1,4 @@
+from app.tools.base import ToolDefinition
+from app.tools.registry import ToolRegistry
+
+__all__ = ["ToolDefinition", "ToolRegistry"]
diff --git a/rag-web-ui/backend/app/tools/base.py b/rag-web-ui/backend/app/tools/base.py
new file mode 100644
index 0000000..67e2b68
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/base.py
@@ -0,0 +1,11 @@
+from dataclasses import dataclass
+from typing import Any, Dict
+
+
+@dataclass(frozen=True)
+class ToolDefinition:
+ name: str
+ version: str
+ description: str
+ input_schema: Dict[str, Any]
+ output_schema: Dict[str, Any]
diff --git a/rag-web-ui/backend/app/tools/registry.py b/rag-web-ui/backend/app/tools/registry.py
new file mode 100644
index 0000000..adb0df6
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/registry.py
@@ -0,0 +1,19 @@
+from typing import Dict, List
+
+from app.tools.base import ToolDefinition
+
+
+class ToolRegistry:
+ _tools: Dict[str, ToolDefinition] = {}
+
+ @classmethod
+ def register(cls, definition: ToolDefinition) -> None:
+ cls._tools[definition.name] = definition
+
+ @classmethod
+ def get(cls, name: str) -> ToolDefinition:
+ return cls._tools[name]
+
+ @classmethod
+ def list(cls) -> List[ToolDefinition]:
+ return list(cls._tools.values())
diff --git a/rag-web-ui/backend/app/tools/srs_reqs_qwen/__init__.py b/rag-web-ui/backend/app/tools/srs_reqs_qwen/__init__.py
new file mode 100644
index 0000000..2c426c3
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/srs_reqs_qwen/__init__.py
@@ -0,0 +1,3 @@
+from app.tools.srs_reqs_qwen.tool import SRSTool, get_srs_tool
+
+__all__ = ["SRSTool", "get_srs_tool"]
diff --git a/rag-web-ui/backend/app/tools/srs_reqs_qwen/default_config.yaml b/rag-web-ui/backend/app/tools/srs_reqs_qwen/default_config.yaml
new file mode 100644
index 0000000..eec6cc3
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/srs_reqs_qwen/default_config.yaml
@@ -0,0 +1,102 @@
+# 配置文件 - SRS 需求文档解析工具 (LLM增强版)
+# Configuration file for SRS Requirement Document Parser (LLM Enhanced Version)
+
+# LLM配置 - 阿里云千问
+llm:
+ # 是否启用LLM(设为false则使用纯规则提取)
+ enabled: true
+ # LLM提供商:qwen(阿里云千问)
+ provider: "qwen"
+ # 模型名称
+ model: "qwen3-max"
+ # API密钥统一由 rag-web-ui 的环境变量提供
+ api_key: ""
+ # 可选参数
+ temperature: 0.3
+ max_tokens: 1024
+
+# 文档解析配置
+document:
+ supported_formats:
+ - ".pdf"
+ - ".docx"
+ # 标题识别的样式列表
+ heading_styles:
+ - "Heading 1"
+ - "Heading 2"
+ - "Heading 3"
+ - "Heading 4"
+ - "Heading 5"
+ # 需要过滤的非需求章节(GJB438B标准)
+ non_requirement_sections:
+ - "标识"
+ - "系统概述"
+ - "文档概述"
+ - "引用文档"
+ - "合格性规定"
+ - "需求可追踪性"
+ - "注释"
+ - "附录"
+
+# 需求提取配置
+extraction:
+ # 需求类型关键字(用于自动判断需求类型)
+ requirement_types:
+ 功能需求:
+ prefix: "FR"
+ keywords: ["功能", "feature", "requirement", "CSCI组成", "控制", "处理", "监测", "显示"]
+ priority: 1
+ 接口需求:
+ prefix: "IR"
+ keywords: ["接口", "interface", "api", "外部接口", "内部接口", "CAN", "以太网", "通信"]
+ priority: 2
+ 性能需求:
+ prefix: "PR"
+ keywords: ["性能", "performance", "速度", "响应时间", "吞吐量"]
+ priority: 3
+ 安全需求:
+ prefix: "SR"
+ keywords: ["安全", "security", "安全性", "报警"]
+ priority: 4
+ 可靠性需求:
+ prefix: "RR"
+ keywords: ["可靠", "reliability", "容错", "恢复", "冗余"]
+ priority: 5
+ 其他需求:
+ prefix: "OR"
+ keywords: ["约束", "资源", "适应性", "保密", "环境", "计算机", "质量", "设计", "人员", "培训", "保障", "验收", "交付"]
+ priority: 6
+ splitter:
+ enabled: true
+ max_sentence_len: 120
+ min_clause_len: 12
+ semantic_guard:
+ enabled: true
+ preserve_condition_action_chain: true
+ preserve_alarm_chain: true
+ table_strategy:
+ llm_semantic_enabled: true
+ sequence_table_merge: "single_requirement"
+ merge_time_series_rows_min: 3
+ rewrite_policy:
+ llm_light_rewrite_enabled: true
+ preserve_ratio_min: 0.65
+ max_length_growth_ratio: 1.25
+ renumber_policy:
+ enabled: true
+ mode: "section_continuous"
+
+# 输出配置
+output:
+ format: "json"
+ indent: 2
+ # 是否美化输出(格式化)
+ pretty_print: true
+ # 是否包含元数据
+ include_metadata: true
+
+# 日志配置
+logging:
+ level: "INFO" # DEBUG, INFO, WARNING, ERROR
+ format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+ file: "srs_parser.log"
diff --git a/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/__init__.py b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/__init__.py
new file mode 100644
index 0000000..4eb5ca7
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/__init__.py
@@ -0,0 +1,26 @@
+# src/__init__.py
+"""
+SRS 需求文档解析工具包
+"""
+
+__version__ = "1.0.0"
+__author__ = "SRS Parser Team"
+
+from .document_parser import DocumentParser
+from .llm_interface import LLMInterface, QwenLLM
+from .requirement_extractor import RequirementExtractor
+from .json_generator import JSONGenerator
+from .settings import AppSettings
+from .requirement_splitter import RequirementSplitter
+from .requirement_id_generator import RequirementIDGenerator
+
+__all__ = [
+ 'DocumentParser',
+ 'LLMInterface',
+ 'QwenLLM',
+ 'RequirementExtractor',
+ 'JSONGenerator',
+ 'AppSettings',
+ 'RequirementSplitter',
+ 'RequirementIDGenerator',
+]
diff --git a/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/document_parser.py b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/document_parser.py
new file mode 100644
index 0000000..859029a
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/document_parser.py
@@ -0,0 +1,709 @@
+# -*- coding: utf-8 -*-
+"""
+文档解析模块 - LLM增强版
+支持PDF和Docx格式,针对GJB438B标准SRS文档优化
+"""
+
+import os
+import re
+import logging
+import importlib
+from abc import ABC, abstractmethod
+from typing import List, Dict, Tuple, Optional, Any
+from pathlib import Path
+
+try:
+ from docx import Document
+ HAS_DOCX = True
+except ImportError:
+ HAS_DOCX = False
+
+try:
+ import PyPDF2
+ HAS_PDF = True
+except ImportError:
+ HAS_PDF = False
+
+HAS_PDF_TABLE = importlib.util.find_spec("pdfplumber") is not None
+
+logger = logging.getLogger(__name__)
+
+
+class Section:
+ """表示文档中的一个章节"""
+
+ def __init__(self, level: int, title: str, number: str = None, content: str = "", uid: str = ""):
+ self.level = level
+ self.title = title
+ self.number = number
+ self.content = content
+ self.uid = uid
+ self.parent = None
+ self.children = []
+ self.tables = []
+ self.blocks = []
+
+ def add_child(self, child: 'Section') -> None:
+ self.children.append(child)
+ child.parent = self
+
+ def add_content(self, text: str) -> None:
+ text = (text or "").strip()
+ if not text:
+ return
+ if self.content:
+ self.content += "\n" + text
+ else:
+ self.content = text
+ self.blocks.append({"type": "text", "text": text})
+
+ def add_table(self, table_data: List[List[str]]) -> None:
+ if not table_data:
+ return
+ self.tables.append(table_data)
+ table_index = len(self.tables) - 1
+ self.blocks.append({"type": "table", "table_index": table_index, "table": table_data})
+
+ def generate_auto_number(self, parent_number: str = "", sibling_index: int = 1) -> None:
+ """
+ 自动生成章节编号(当章节没有编号时)
+
+ Args:
+ parent_number: 父章节编号
+ sibling_index: 在同级章节中的序号(从1开始)
+ """
+ if not self.number:
+ if parent_number:
+ self.number = f"{parent_number}.{sibling_index}"
+ else:
+ self.number = str(sibling_index)
+
+ def __repr__(self) -> str:
+ return f"Section(level={self.level}, number='{self.number}', title='{self.title}')"
+
+
+class DocumentParser(ABC):
+ """文档解析器基类"""
+
+ def __init__(self, file_path: str):
+ self.file_path = file_path
+ self.sections: List[Section] = []
+ self.document_title = ""
+ self.raw_text = ""
+ self.llm = None
+ self._uid_counter = 0
+
+ def set_llm(self, llm) -> None:
+ """设置LLM实例"""
+ self.llm = llm
+
+ @abstractmethod
+ def parse(self) -> List[Section]:
+ pass
+
+ def get_document_title(self) -> str:
+ return self.document_title
+
+ def _next_uid(self) -> str:
+ self._uid_counter += 1
+ return f"sec-{self._uid_counter}"
+
+ def _auto_number_sections(self, sections: List[Section], parent_number: str = "") -> None:
+ """
+ 为没有编号的章节自动生成编号
+
+ 规则:使用Word样式确定级别,跳过前置章节(目录、概述等),
+ 从第一个正文章节(如"外部接口")开始编号为1
+
+ Args:
+ sections: 章节列表
+ parent_number: 父章节编号
+ """
+ # 仅在顶级章节重编号
+ if not parent_number:
+ # 前置章节关键词(需要跳过的)
+ skip_keywords = ['目录', '封面', '扉页', '未命名', '年', '月']
+ # 正文章节关键词(遇到这些说明正文开始)
+ content_keywords = ['外部接口', '接口', '软件需求', '需求', '功能', '性能', '设计', '概述', '标识', '引言']
+
+ start_index = 0
+ for idx, section in enumerate(sections):
+ # 优先检查是否是正文章节
+ is_content = any(kw in section.title for kw in content_keywords)
+ if is_content and section.level == 1:
+ start_index = idx
+ break
+
+ # 重新编号所有章节
+ counter = 1
+ for i, section in enumerate(sections):
+ if i < start_index:
+ # 前置章节不编号
+ section.number = ""
+ else:
+ # 正文章节:顶级章节从1开始编号
+ if section.level == 1:
+ section.number = str(counter)
+ counter += 1
+
+ # 递归处理子章节
+ if section.children:
+ self._auto_number_sections(section.children, section.number)
+ else:
+ # 子章节编号
+ for i, section in enumerate(sections, 1):
+ if not section.number or self._is_chinese_number(section.number):
+ section.generate_auto_number(parent_number, i)
+ if section.children:
+ self._auto_number_sections(section.children, section.number)
+
+ def _is_chinese_number(self, text: str) -> bool:
+ """检查是否是中文数字编号"""
+ chinese_numbers = '一二三四五六七八九十百千万'
+ return text and all(c in chinese_numbers for c in text)
+
+
+class DocxParser(DocumentParser):
+ """DOCX格式文档解析器"""
+
+ def __init__(self, file_path: str):
+ if not HAS_DOCX:
+ raise ImportError("python-docx库未安装,请运行: pip install python-docx")
+ super().__init__(file_path)
+ self.document = None
+
+ def parse(self) -> List[Section]:
+ try:
+ self.document = Document(self.file_path)
+ self.document_title = self.document.core_properties.title or "SRS Document"
+
+ section_stack = {}
+
+ for block in self._iter_block_items(self.document):
+ from docx.text.paragraph import Paragraph
+ from docx.table import Table
+ if isinstance(block, Paragraph):
+ text = block.text.strip()
+ if not text:
+ continue
+
+ heading_info = self._parse_heading(block, text)
+ if heading_info:
+ number, title, level = heading_info
+ section = Section(level=level, title=title, number=number, uid=self._next_uid())
+
+ if level == 1 or not section_stack:
+ self.sections.append(section)
+ section_stack = {1: section}
+ else:
+ parent_level = level - 1
+ while parent_level >= 1 and parent_level not in section_stack:
+ parent_level -= 1
+
+ if parent_level >= 1 and parent_level in section_stack:
+ section_stack[parent_level].add_child(section)
+ elif self.sections:
+ self.sections[-1].add_child(section)
+
+ section_stack[level] = section
+ for l in list(section_stack.keys()):
+ if l > level:
+ del section_stack[l]
+ else:
+ # 添加内容到当前章节
+ if section_stack:
+ max_level = max(section_stack.keys())
+ section_stack[max_level].add_content(text)
+ else:
+ # 没有标题时,创建默认章节
+ default_section = Section(level=1, title="未命名章节", number="", uid=self._next_uid())
+ default_section.add_content(text)
+ self.sections.append(default_section)
+ section_stack = {1: default_section}
+ elif isinstance(block, Table):
+ # 表格处理
+ table_data = self._extract_table_data(block)
+ if table_data:
+ if section_stack:
+ max_level = max(section_stack.keys())
+ section_stack[max_level].add_table(table_data)
+ else:
+ default_section = Section(level=1, title="未命名章节", number="", uid=self._next_uid())
+ default_section.add_table(table_data)
+ self.sections.append(default_section)
+ section_stack = {1: default_section}
+
+ # 为没有编号的章节自动生成编号
+ self._auto_number_sections(self.sections)
+
+ logger.info(f"完成Docx解析,提取{len(self.sections)}个顶级章节")
+ return self.sections
+
+ except Exception as e:
+ logger.error(f"解析Docx文档失败: {e}")
+ raise
+
+ def _is_valid_heading(self, text: str) -> bool:
+ """检查是否是有效的标题"""
+ if len(text) > 120 or '...' in text:
+ return False
+ # 标题应包含中文或字母
+ if not re.search(r'[\u4e00-\u9fa5A-Za-z]', text):
+ return False
+ # 过滤目录项(标题后跟页码,如"概述 2"或"概述 . . . . 2")
+ if re.search(r'\s{2,}\d+$', text): # 多个空格后跟数字结尾
+ return False
+ if re.search(r'[\.。\s]+\d+$', text): # 点号或空格后跟数字结尾
+ return False
+ return True
+
+ def _parse_heading(self, paragraph, text: str) -> Optional[Tuple[str, str, int]]:
+ """解析标题,返回(编号, 标题, 级别)"""
+ style_name = paragraph.style.name if paragraph.style else ""
+ is_heading_style = style_name.lower().startswith('heading') if style_name else False
+
+ # 数字编号标题
+ match = re.match(r'^(\d+(?:\.\d+)*)\s*[\.、]?\s*(.+)$', text)
+ if match and self._is_valid_heading(match.group(2)):
+ number = match.group(1)
+ title = match.group(2).strip()
+ level = len(number.split('.'))
+ return number, title, level
+
+ # 中文编号标题
+ match = re.match(r'^([一二三四五六七八九十]+)[、\.]+\s*(.+)$', text)
+ if match and self._is_valid_heading(match.group(2)):
+ number = match.group(1)
+ title = match.group(2).strip()
+ level = 1
+ return number, title, level
+
+ # 样式标题
+ if is_heading_style and self._is_valid_heading(text):
+ level = 1
+ level_match = re.search(r'(\d+)', style_name)
+ if level_match:
+ level = int(level_match.group(1))
+ return "", text, level
+
+ return None
+
+ def _iter_block_items(self, parent):
+ """按文档顺序迭代段落和表格"""
+ from docx.text.paragraph import Paragraph
+ from docx.table import Table
+ from docx.oxml.text.paragraph import CT_P
+ from docx.oxml.table import CT_Tbl
+
+ for child in parent.element.body.iterchildren():
+ if isinstance(child, CT_P):
+ yield Paragraph(child, parent)
+ elif isinstance(child, CT_Tbl):
+ yield Table(child, parent)
+
+ def _extract_table_data(self, table) -> List[List[str]]:
+ """提取表格数据"""
+ table_data = []
+ for row in table.rows:
+ row_data = []
+ for cell in row.cells:
+ text = cell.text.replace('\n', ' ').strip()
+ text = re.sub(r'\s+', ' ', text)
+ row_data.append(text)
+ if any(cell for cell in row_data):
+ table_data.append(row_data)
+ return table_data
+
+
+class PDFParser(DocumentParser):
+ """PDF格式文档解析器 - LLM增强版"""
+
+ # GJB438B标准SRS文档的有效章节标题关键词
+ VALID_TITLE_KEYWORDS = [
+ '范围', '标识', '概述', '引用', '文档',
+ '需求', '功能', '接口', '性能', '安全', '保密',
+ '环境', '资源', '质量', '设计', '约束',
+ '人员', '培训', '保障', '验收', '交付', '包装',
+ '优先', '关键', '合格', '追踪', '注释',
+ 'CSCI', '计算机', '软件', '硬件', '通信', '通讯',
+ '数据', '适应', '可靠', '内部', '外部',
+ '描述', '要求', '规定', '说明', '定义',
+ '电场', '防护', '装置', '控制', '监控', '显控'
+ ]
+
+ # 明显无效的章节标题模式(噪声)
+ INVALID_TITLE_PATTERNS = [
+ '本文档可作为', '参比电位', '补偿电流', '以太网',
+ '电源', '软件接', '功能\\', '性能 \\', '输入/输出 \\',
+ '数据处理要求 \\', '固件 \\', '质量控制要求',
+ '信安科技', '浙江', '公司'
+ ]
+
+ def __init__(self, file_path: str):
+ if not HAS_PDF:
+ raise ImportError("PyPDF2库未安装,请运行: pip install PyPDF2")
+ super().__init__(file_path)
+ self.document_title = "SRS Document"
+ self._page_texts: List[str] = []
+
+ def parse(self) -> List[Section]:
+ """解析PDF文档"""
+ try:
+ # 1. 提取所有文本
+ self.raw_text = self._extract_all_text()
+
+ # 2. 清洗文本
+ cleaned_text = self._clean_text(self.raw_text)
+
+ # 3. 识别章节结构
+ self.sections = self._parse_sections(cleaned_text)
+
+ # 4. 使用LLM验证和清理章节(如果可用)
+ if self.llm:
+ self.sections = self._llm_validate_sections(self.sections)
+
+ # 章节识别失败时,创建兜底章节避免后续表格数据丢失。
+ if not self.sections:
+ fallback = Section(level=1, title="未命名章节", number="1", uid=self._next_uid())
+ if cleaned_text:
+ fallback.add_content(cleaned_text)
+ self.sections = [fallback]
+
+ # 5. 提取并挂接PDF表格到章节(若依赖可用)
+ pdf_tables = self._extract_pdf_tables()
+ if pdf_tables:
+ self._attach_pdf_tables_to_sections(pdf_tables)
+
+ # 6. 为没有编号的章节自动生成编号
+ self._auto_number_sections(self.sections)
+
+ logger.info(f"完成PDF解析,提取{len(self.sections)}个顶级章节")
+ return self.sections
+
+ except Exception as e:
+ logger.error(f"解析PDF文档失败: {e}")
+ raise
+
+ def _extract_all_text(self) -> str:
+ """从PDF提取所有文本"""
+ all_text = []
+ with open(self.file_path, 'rb') as f:
+ pdf_reader = PyPDF2.PdfReader(f)
+ for page in pdf_reader.pages:
+ text = page.extract_text()
+ if text:
+ all_text.append(text)
+ self._page_texts = all_text
+ return '\n'.join(all_text)
+
+ def _extract_pdf_tables(self) -> List[Dict[str, Any]]:
+ """提取PDF中的表格数据。"""
+ if not HAS_PDF_TABLE:
+ logger.warning("未安装pdfplumber,跳过PDF表格提取。可执行: pip install pdfplumber")
+ return []
+
+ tables: List[Dict[str, Any]] = []
+ try:
+ pdfplumber = importlib.import_module("pdfplumber")
+ with pdfplumber.open(self.file_path) as pdf:
+ for page_idx, page in enumerate(pdf.pages):
+ page_text = ""
+ if page_idx < len(self._page_texts):
+ page_text = self._page_texts[page_idx]
+
+ extracted_tables = page.extract_tables() or []
+ for table_idx, table in enumerate(extracted_tables):
+ cleaned_table: List[List[str]] = []
+ for row in table or []:
+ cells = [re.sub(r'\s+', ' ', str(cell or '')).strip() for cell in row]
+ if any(cells):
+ cleaned_table.append(cells)
+
+ if cleaned_table:
+ tables.append(
+ {
+ "page_idx": page_idx,
+ "table_idx": table_idx,
+ "page_text": page_text,
+ "data": cleaned_table,
+ }
+ )
+ except Exception as e:
+ logger.warning(f"PDF表格提取失败,继续纯文本流程: {e}")
+ return []
+
+ logger.info(f"PDF表格提取完成,共{len(tables)}个表格")
+ return tables
+
+ def _attach_pdf_tables_to_sections(self, tables: List[Dict[str, Any]]) -> None:
+ """将提取出的PDF表格挂接到最匹配的章节。"""
+ flat_sections = self._flatten_sections(self.sections)
+ if not flat_sections:
+ return
+
+ last_section: Optional[Section] = None
+ for table in tables:
+ matched = self._match_table_section(table.get("page_text", ""), flat_sections)
+ target = matched or last_section or flat_sections[0]
+ target.add_table(table["data"])
+ last_section = target
+
+ def _flatten_sections(self, sections: List[Section]) -> List[Section]:
+ """按文档顺序拉平章节树。"""
+ result: List[Section] = []
+ for section in sections:
+ result.append(section)
+ if section.children:
+ result.extend(self._flatten_sections(section.children))
+ return result
+
+ def _match_table_section(self, page_text: str, sections: List[Section]) -> Optional[Section]:
+ """基于页文本匹配表格归属章节。"""
+ normalized_page = re.sub(r"\s+", "", (page_text or "")).lower()
+ if not normalized_page:
+ return None
+
+ matched: Optional[Section] = None
+ matched_score = -1
+ for section in sections:
+ title = (section.title or "").strip()
+ if not title:
+ continue
+
+ number = (section.number or "").strip()
+ candidates = [title]
+ if number:
+ candidates.append(f"{number}{title}")
+ candidates.append(f"{number} {title}")
+
+ for candidate in candidates:
+ normalized_candidate = re.sub(r"\s+", "", candidate).lower()
+ if normalized_candidate and normalized_candidate in normalized_page:
+ score = len(normalized_candidate)
+ if score > matched_score:
+ matched = section
+ matched_score = score
+
+ return matched
+
+ def _clean_text(self, text: str) -> str:
+ """清洗PDF提取的文本"""
+ lines = text.split('\n')
+ cleaned_lines = []
+
+ for line in lines:
+ line = line.strip()
+ if not line:
+ continue
+ # 跳过页码(通常是1-3位数字单独一行)
+ if re.match(r'^\d{1,3}$', line):
+ continue
+ # 跳过目录行
+ if line.count('.') > 10 and '...' in line:
+ continue
+
+ cleaned_lines.append(line)
+
+ return '\n'.join(cleaned_lines)
+
+ def _parse_sections(self, text: str) -> List[Section]:
+ """解析章节结构"""
+ sections = []
+ section_stack = {}
+ lines = text.split('\n')
+ current_section = None
+ content_buffer = []
+ found_sections = set()
+
+ for line in lines:
+ line = line.strip()
+ if not line:
+ continue
+
+ # 尝试匹配章节标题
+ section_info = self._match_section_header(line, found_sections)
+
+ if section_info:
+ number, title = section_info
+ level = len(number.split('.'))
+
+ # 保存之前章节的内容
+ if current_section and content_buffer:
+ current_section.add_content('\n'.join(content_buffer))
+ content_buffer = []
+
+ # 创建新章节
+ section = Section(level=level, title=title, number=number, uid=self._next_uid())
+ found_sections.add(number)
+
+ # 建立层次结构
+ if level == 1:
+ sections.append(section)
+ section_stack = {1: section}
+ else:
+ parent_level = level - 1
+ while parent_level >= 1 and parent_level not in section_stack:
+ parent_level -= 1
+
+ if parent_level >= 1 and parent_level in section_stack:
+ section_stack[parent_level].add_child(section)
+ elif sections:
+ sections[-1].add_child(section)
+ else:
+ sections.append(section)
+ section_stack = {1: section}
+
+ section_stack[level] = section
+ for l in list(section_stack.keys()):
+ if l > level:
+ del section_stack[l]
+
+ current_section = section
+ else:
+ # 收集内容
+ if line and not self._is_noise(line):
+ content_buffer.append(line)
+
+ # 保存最后一个章节的内容
+ if current_section and content_buffer:
+ current_section.add_content('\n'.join(content_buffer))
+
+ return sections
+
+ def _match_section_header(self, line: str, found_sections: set) -> Optional[Tuple[str, str]]:
+ """
+ 匹配章节标题
+
+ Returns:
+ (章节编号, 章节标题) 或 None
+ """
+ # 模式: "3.1功能需求" 或 "3.1 功能需求"
+ match = re.match(r'^(\d+(?:\.\d+)*)\s*(.+)$', line)
+ if not match:
+ return None
+
+ number = match.group(1)
+ title = match.group(2).strip()
+
+ # 排除目录行
+ if '...' in title or title.count('.') > 5:
+ return None
+
+ # 验证章节编号
+ parts = number.split('.')
+ first_part = int(parts[0])
+
+ # 放宽一级章节编号范围(非严格GJB结构)
+ if first_part < 1 or first_part > 30:
+ return None
+
+ # 检查子部分是否合理
+ for part in parts[1:]:
+ if int(part) > 20:
+ return None
+
+ # 避免重复
+ if number in found_sections:
+ return None
+
+ # 标题长度检查
+ if len(title) > 60 or len(title) < 2:
+ return None
+
+ # 放宽标题字符要求(兼容部分PDF字体导致中文抽取异常的情况)
+ if not re.search(r'[\u4e00-\u9fa5A-Za-z]', title):
+ return None
+
+ # 检查是否包含无效模式
+ for invalid_pattern in self.INVALID_TITLE_PATTERNS:
+ if invalid_pattern in title:
+ return None
+
+ # 标题不能以数字开头
+ if title[0].isdigit():
+ return None
+
+ # 数字比例检查
+ digit_ratio = sum(c.isdigit() for c in title) / max(len(title), 1)
+ if digit_ratio > 0.3:
+ return None
+
+ # 检查标题是否包含反斜杠(通常是表格噪声)
+ if '\\' in title and '需求' not in title:
+ return None
+
+ return (number, title)
+
+ def _is_noise(self, line: str) -> bool:
+ """检查是否是噪声行"""
+ # 纯数字行
+ if re.match(r'^[\d\s,.]+$', line):
+ return True
+ # 非常短的行
+ if len(line) < 3:
+ return True
+ # 罗马数字
+ if re.match(r'^[ivxIVX]+$', line):
+ return True
+ return False
+
+ def _llm_validate_sections(self, sections: List[Section]) -> List[Section]:
+ """使用LLM验证章节是否有效"""
+ if not self.llm:
+ return sections
+
+ validated_sections = []
+
+ for section in sections:
+ # 验证顶级章节
+ if self._is_valid_section_with_llm(section):
+ # 递归验证子章节
+ section.children = self._validate_children(section.children)
+ validated_sections.append(section)
+
+ return validated_sections
+
+ def _validate_children(self, children: List[Section]) -> List[Section]:
+ """递归验证子章节"""
+ validated = []
+ for child in children:
+ if self._is_valid_section_with_llm(child):
+ child.children = self._validate_children(child.children)
+ validated.append(child)
+ return validated
+
+ def _is_valid_section_with_llm(self, section: Section) -> bool:
+ """使用LLM判断章节是否有效"""
+ # 先用规则快速过滤明显无效的章节
+ invalid_titles = [
+ '本文档可作为', '故障', '实时', '输入/输出',
+ '固件', '功能\\', '\\4.', '\\3.'
+ ]
+ for invalid in invalid_titles:
+ if invalid in section.title:
+ logger.debug(f"过滤无效章节: {section.number} {section.title}")
+ return False
+
+ # 对于需求相关章节(第3章),额外验证
+ if section.number and section.number.startswith('3'):
+ # 检查标题是否看起来像是有效的需求章节标题
+ # 有效的标题应该是完整的中文短语
+ if '\\' in section.title or '/' in section.title:
+ if not any(kw in section.title for kw in ['输入', '输出', '接口']):
+ return False
+
+ return True
+
+
+def create_parser(file_path: str) -> DocumentParser:
+ """
+ 工厂函数:根据文件扩展名创建相应的解析器
+ """
+ ext = Path(file_path).suffix.lower()
+
+ if ext == '.docx':
+ return DocxParser(file_path)
+ elif ext == '.pdf':
+ return PDFParser(file_path)
+ else:
+ raise ValueError(f"不支持的文件格式: {ext}")
diff --git a/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/json_generator.py b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/json_generator.py
new file mode 100644
index 0000000..1bc46a3
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/json_generator.py
@@ -0,0 +1,198 @@
+# -*- coding: utf-8 -*-
+"""
+JSON生成器模块 - LLM增强版
+将提取的需求和章节结构转换为结构化JSON输出
+"""
+
+import json
+import logging
+from datetime import datetime
+from typing import List, Dict, Any, Optional
+from .document_parser import Section
+from .requirement_extractor import Requirement
+from .settings import AppSettings
+
+logger = logging.getLogger(__name__)
+
+
+class JSONGenerator:
+ """JSON输出生成器"""
+
+ def __init__(self, config: Dict = None):
+ self.config = config or {}
+ self.settings = AppSettings(self.config)
+
+ def generate(self, sections: List[Section], requirements: List[Requirement],
+ document_title: str = "SRS Document") -> Dict[str, Any]:
+ """
+ 生成JSON输出
+
+ Args:
+ sections: 章节列表
+ requirements: 需求列表
+ document_title: 文档标题
+
+ Returns:
+ 结构化JSON字典
+ """
+ # 按章节组织需求
+ reqs_by_section = self._group_requirements_by_section(requirements)
+
+ # 统计需求类型
+ type_stats = self._calculate_type_statistics(requirements)
+
+ # 构建输出结构
+ output = {
+ "文档元数据": {
+ "标题": document_title,
+ "生成时间": datetime.now().isoformat(),
+ "总需求数": len(requirements),
+ "需求类型统计": type_stats
+ },
+ "需求内容": self._build_requirement_content(sections, reqs_by_section)
+ }
+
+ logger.info(f"生成JSON输出,共{len(requirements)}个需求")
+ return output
+
+ def _group_requirements_by_section(self, requirements: List[Requirement]) -> Dict[str, List[Requirement]]:
+ """按章节编号分组需求"""
+ grouped = {}
+ for req in requirements:
+ section_key = req.section_uid or req.section_number or 'unknown'
+ if section_key not in grouped:
+ grouped[section_key] = []
+ grouped[section_key].append(req)
+ return grouped
+
+ def _calculate_type_statistics(self, requirements: List[Requirement]) -> Dict[str, int]:
+ """计算需求类型统计"""
+ stats = {}
+ for req in requirements:
+ type_chinese = self.settings.type_chinese.get(req.type, '其他需求')
+ if type_chinese not in stats:
+ stats[type_chinese] = 0
+ stats[type_chinese] += 1
+ return stats
+
+ def _should_include_section(self, section: Section) -> bool:
+ """判断章节是否应该包含在输出中"""
+ return not self.settings.is_non_requirement_section(section.title)
+
+ def _build_requirement_content(self, sections: List[Section],
+ reqs_by_section: Dict[str, List[Requirement]]) -> Dict[str, Any]:
+ """构建需求内容的层次结构"""
+ content = {}
+
+ for section in sections:
+ # 只处理需求相关章节
+ if not self._should_include_section(section):
+ # 但仍需检查子章节
+ for child in section.children:
+ child_content = self._build_section_content_recursive(child, reqs_by_section)
+ if child_content:
+ key = f"{child.number} {child.title}" if child.number else child.title
+ content[key] = child_content
+ continue
+
+ section_content = self._build_section_content_recursive(section, reqs_by_section)
+ if section_content:
+ key = f"{section.number} {section.title}" if section.number else section.title
+ content[key] = section_content
+
+ return content
+
+ def _build_section_content_recursive(self, section: Section,
+ reqs_by_section: Dict[str, List[Requirement]]) -> Optional[Dict[str, Any]]:
+ """递归构建章节内容"""
+ # 检查是否应该包含此章节
+ if not self._should_include_section(section):
+ return None
+
+ # 章节基本信息
+ result = {
+ "章节信息": {
+ "章节编号": section.number or "",
+ "章节标题": section.title,
+ "章节级别": section.level
+ }
+ }
+
+ # 检查是否有子章节
+ has_valid_children = False
+ subsections = {}
+
+ for child in section.children:
+ child_content = self._build_section_content_recursive(child, reqs_by_section)
+ if child_content:
+ has_valid_children = True
+ key = f"{child.number} {child.title}" if child.number else child.title
+ subsections[key] = child_content
+
+ # 添加当前章节需求
+ reqs = reqs_by_section.get(section.uid or section.number or 'unknown', [])
+ reqs = sorted(reqs, key=lambda r: getattr(r, 'source_order', 0))
+ if reqs:
+ result["需求列表"] = []
+ for req in reqs:
+ # 需求类型放在最前面
+ type_chinese = self.settings.type_chinese.get(req.type, '功能需求')
+ req_dict = {
+ "需求类型": type_chinese,
+ "需求编号": req.id,
+ "需求描述": req.description
+ }
+ # 接口需求增加额外字段
+ if req.type == 'interface':
+ req_dict["接口名称"] = req.interface_name
+ req_dict["接口类型"] = req.interface_type
+ req_dict["来源"] = req.source
+ req_dict["目的地"] = req.destination
+ result["需求列表"].append(req_dict)
+
+ # 如果有子章节,添加子章节
+ if has_valid_children:
+ result["子章节"] = subsections
+
+ # 如果章节既没有需求也没有子章节,返回None
+ if "需求列表" not in result and "子章节" not in result:
+ return None
+
+ return result
+
+ def save_to_file(self, output: Dict[str, Any], file_path: str) -> None:
+ """
+ 将输出保存到文件
+
+ Args:
+ output: 输出字典
+ file_path: 输出文件路径
+ """
+ try:
+ output_cfg = self.config.get("output", {})
+ indent = output_cfg.get("indent", 2)
+ pretty = output_cfg.get("pretty_print", True)
+ with open(file_path, 'w', encoding='utf-8') as f:
+ json.dump(output, f, ensure_ascii=False, indent=indent if pretty else None)
+ logger.info(f"成功保存JSON到: {file_path}")
+ except Exception as e:
+ logger.error(f"保存JSON文件失败: {e}")
+ raise
+
+ def generate_and_save(self, sections: List[Section], requirements: List[Requirement],
+ document_title: str, file_path: str) -> Dict[str, Any]:
+ """
+ 生成并保存JSON
+
+ Args:
+ sections: 章节列表
+ requirements: 需求列表
+ document_title: 文档标题
+ file_path: 输出文件路径
+
+ Returns:
+ 生成的输出字典
+ """
+ output = self.generate(sections, requirements, document_title)
+ self.save_to_file(output, file_path)
+ return output
diff --git a/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/llm_interface.py b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/llm_interface.py
new file mode 100644
index 0000000..b2db801
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/llm_interface.py
@@ -0,0 +1,197 @@
+# src/llm_interface.py
+"""
+LLM接口模块 - 支持多个LLM提供商
+"""
+
+import logging
+import json
+from abc import ABC, abstractmethod
+from typing import Dict, List, Optional, Any
+
+from .utils import get_env_or_config
+
+logger = logging.getLogger(__name__)
+
+
+class LLMInterface(ABC):
+ """LLM接口基类"""
+
+ def __init__(self, api_key: str = None, model: str = None, **kwargs):
+ """
+ 初始化LLM接口
+
+ Args:
+ api_key: API密钥
+ model: 模型名称
+ **kwargs: 其他参数(如temperature, max_tokens等)
+ """
+ self.api_key = api_key
+ self.model = model
+ self.extra_params = kwargs
+
+ @abstractmethod
+ def call(self, prompt: str) -> str:
+ """
+ 调用LLM API
+
+ Args:
+ prompt: 提示词
+
+ Returns:
+ LLM的响应文本
+ """
+ pass
+
+ @abstractmethod
+ def call_json(self, prompt: str) -> Dict[str, Any]:
+ """
+ 调用LLM API并获取JSON格式的响应
+
+ Args:
+ prompt: 提示词
+
+ Returns:
+ 解析后的JSON字典
+ """
+ pass
+
+ def validate_config(self) -> bool:
+ """验证配置是否完整"""
+ return bool(self.api_key and self.model)
+
+
+class QwenLLM(LLMInterface):
+ """阿里云千问LLM实现"""
+
+ def __init__(self, api_key: str = None, model: str = "qwen-plus",
+ api_endpoint: str = None, **kwargs):
+ """
+ 初始化千问LLM
+
+ Args:
+ api_key: 阿里云API密钥
+ model: 模型名称(如qwen-plus, qwen-turbo)
+ api_endpoint: API端点地址
+ **kwargs: 其他参数
+ """
+ super().__init__(api_key, model, **kwargs)
+ self.api_endpoint = api_endpoint or "https://dashscope.aliyuncs.com/compatible-mode/v1"
+ self._check_dashscope_import()
+
+ def _check_dashscope_import(self) -> None:
+ """检查dashscope库是否已安装"""
+ try:
+ import dashscope
+ self.dashscope = dashscope
+ except ImportError:
+ logger.error("dashscope库未安装,请运行: pip install dashscope")
+ raise
+
+ def call(self, prompt: str) -> str:
+ """
+ 调用千问LLM
+
+ Args:
+ prompt: 提示词
+
+ Returns:
+ LLM的响应文本
+ """
+ if not self.validate_config():
+ raise ValueError("LLM配置不完整(api_key或model未设置)")
+
+ try:
+ from dashscope import Generation
+
+ # 设置API密钥
+ self.dashscope.api_key = self.api_key
+
+ # 构建请求参数 - dashscope 1.7.0 格式
+ response = Generation.call(
+ model=self.model,
+ messages=[
+ {'role': 'user', 'content': prompt}
+ ],
+ result_format='message' # 使用message格式
+ )
+
+ # 调试输出
+ logger.debug(f"API响应类型: {type(response)}")
+ logger.debug(f"API响应内容: {response}")
+
+ # 处理响应
+ if isinstance(response, dict):
+ # dict格式响应
+ status_code = response.get('status_code', 200)
+ if status_code == 200:
+ output = response.get('output', {})
+ if 'choices' in output:
+ return output['choices'][0]['message']['content']
+ elif 'text' in output:
+ return output['text']
+ else:
+ # 尝试直接获取text
+ return output.get('text', str(output))
+ else:
+ error_msg = response.get('message', response.get('code', 'Unknown error'))
+ logger.error(f"千问API返回错误: {error_msg}")
+ raise Exception(f"API调用失败: {error_msg}")
+ else:
+ # 对象格式响应
+ if hasattr(response, 'status_code') and response.status_code == 200:
+ output = response.output
+ if hasattr(output, 'choices'):
+ return output.choices[0].message.content
+ elif hasattr(output, 'text'):
+ return output.text
+ else:
+ return str(output)
+ elif hasattr(response, 'status_code'):
+ error_msg = getattr(response, 'message', str(response))
+ raise Exception(f"API调用失败: {error_msg}")
+ else:
+ return str(response)
+
+ except Exception as e:
+ logger.error(f"调用千问LLM失败: {e}")
+ raise
+
+ def call_json(self, prompt: str) -> Dict[str, Any]:
+ """
+ 调用千问LLM并获取JSON格式响应
+
+ Args:
+ prompt: 提示词
+
+ Returns:
+ 解析后的JSON字典
+ """
+ # 添加JSON格式要求到提示词
+ json_prompt = prompt + "\n\n请确保响应是有效的JSON格式。"
+
+ response = self.call(json_prompt)
+
+ try:
+ # 尝试解析JSON
+ # 首先尝试直接解析
+ return json.loads(response)
+ except json.JSONDecodeError:
+ # 尝试提取JSON代码块
+ try:
+ import re
+ # 查找JSON代码块
+ json_match = re.search(r'```json\s*(.*?)\s*```', response, re.DOTALL)
+ if json_match:
+ return json.loads(json_match.group(1))
+
+ # 尝试查找任何JSON对象
+ json_match = re.search(r'\{.*\}', response, re.DOTALL)
+ if json_match:
+ return json.loads(json_match.group(0))
+
+ except Exception as e:
+ logger.warning(f"无法从响应中提取JSON: {e}")
+
+ # 如果都失败,返回错误信息
+ logger.error(f"无法解析LLM响应为JSON: {response}")
+ return {"error": "Failed to parse response as JSON", "raw_response": response}
diff --git a/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/requirement_extractor.py b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/requirement_extractor.py
new file mode 100644
index 0000000..dbfef14
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/requirement_extractor.py
@@ -0,0 +1,1008 @@
+# -*- coding: utf-8 -*-
+"""
+需求提取器模块 - LLM增强版
+使用阿里云千问大模型智能提取和分类需求
+"""
+
+import re
+import json
+import logging
+from typing import List, Dict, Optional, Tuple, Any
+from .document_parser import Section
+from .settings import AppSettings
+from .requirement_id_generator import RequirementIDGenerator
+from .requirement_splitter import RequirementSplitter
+
+logger = logging.getLogger(__name__)
+
+
+class Requirement:
+ """表示一个需求项"""
+
+ def __init__(self, req_id: str, description: str, req_type: str = "functional",
+ section_number: str = "", section_title: str = "",
+ interface_name: str = "", interface_type: str = "",
+ section_uid: str = "",
+ source: str = "", destination: str = "",
+ source_type: str = "text", source_order: int = 0,
+ source_table_index: int = -1, source_row_span: str = ""):
+ self.id = req_id
+ self.description = description
+ self.type = req_type
+ self.section_number = section_number
+ self.section_title = section_title
+ self.section_uid = section_uid
+ # 接口需求特有字段
+ self.interface_name = interface_name
+ self.interface_type = interface_type
+ self.source = source
+ self.destination = destination
+ self.source_type = source_type
+ self.source_order = source_order
+ self.source_table_index = source_table_index
+ self.source_row_span = source_row_span
+
+ def to_dict(self) -> Dict:
+ result = {
+ "需求编号": self.id,
+ "需求描述": self.description
+ }
+ # 接口需求增加额外字段
+ if self.type == 'interface':
+ result["接口名称"] = self.interface_name
+ result["接口类型"] = self.interface_type
+ result["来源"] = self.source
+ result["目的地"] = self.destination
+ return result
+
+ def __repr__(self) -> str:
+ return f"Requirement(id='{self.id}', type='{self.type}')"
+
+
+class RequirementExtractor:
+ """需求提取器 - LLM增强版"""
+
+ def __init__(self, config: Dict = None, llm=None):
+ self.config = config or {}
+ self.llm = llm
+ self.settings = AppSettings(self.config)
+ self.id_generator = RequirementIDGenerator(self.settings.type_prefix)
+ self.splitter = None
+ if self.settings.splitter_enabled:
+ self.splitter = RequirementSplitter(
+ max_sentence_len=self.settings.splitter_max_sentence_len,
+ min_clause_len=self.settings.splitter_min_clause_len,
+ )
+ self.requirements: List[Requirement] = []
+ self._req_counters: Dict[str, Dict[str, int]] = {} # {section_number: {type: count}}
+ self._global_order = 0
+
+ def extract_from_sections(self, sections: List[Section]) -> List[Requirement]:
+ """
+ 从章节列表中提取需求
+
+ Args:
+ sections: 解析后的章节列表
+
+ Returns:
+ 需求列表
+ """
+ self.requirements = []
+ self._req_counters = {}
+ self._global_order = 0
+
+ for section in sections:
+ self._process_section(section)
+
+ # 去重后统一连续重编号,避免出现跳号。
+ if self.settings.renumber_enabled:
+ self.requirements = self._renumber_requirements_continuous(self.requirements)
+
+ logger.info(f"共提取 {len(self.requirements)} 个需求项")
+ return self.requirements
+
+ def _process_section(self, section: Section, depth: int = 0) -> None:
+ """递归处理章节,提取需求"""
+ # 检查是否应该跳过此章节
+ if self._should_skip_section(section):
+ logger.debug(f"跳过非需求章节: {section.number} {section.title}")
+ for child in section.children:
+ self._process_section(child, depth + 1)
+ return
+
+ # 先提取当前章节需求(包含表格)
+ reqs = self._extract_requirements_from_section(section)
+ self.requirements.extend(reqs)
+
+ # 再递归处理子章节
+ for child in section.children:
+ self._process_section(child, depth + 1)
+
+ def _should_skip_section(self, section: Section) -> bool:
+ """判断是否应该跳过此章节"""
+ if self.settings.is_non_requirement_section(section.title):
+ return True
+
+ # 检查是否是系统描述章节(如3.1.1通常是系统描述)
+ if self._is_system_description(section):
+ return True
+
+ return False
+
+ def _is_system_description(self, section: Section) -> bool:
+ """判断是否是系统描述章节(应该跳过)"""
+ # 检查标题
+ desc_keywords = ['系统描述', '功能描述', '概述', '示意图', '组成']
+ for kw in desc_keywords:
+ if kw in section.title:
+ return True
+
+ # 使用LLM判断
+ if self.llm and section.content:
+ try:
+ result = self._llm_check_system_description(section)
+ return result
+ except Exception as e:
+ logger.warning(f"LLM判断失败,使用规则判断: {e}")
+
+ return False
+
+ def _llm_check_system_description(self, section: Section) -> bool:
+ """使用LLM判断是否是系统描述"""
+ prompt = f"""请判断以下章节是否是对系统的整体描述(而不是具体的功能需求)。
+
+章节编号:{section.number}
+章节标题:{section.title}
+章节内容(前500字符):
+{section.content[:500] if section.content else '无'}
+
+请只回答"是"或"否":
+- "是":这是系统整体描述、功能模块组成介绍、系统架构说明等概述性内容
+- "否":这是具体的功能需求、接口需求、性能要求等可提取的需求内容
+
+回答(只需要回答"是"或"否"):"""
+
+ response = self.llm.call(prompt).strip()
+ return '是' in response
+
+ def _extract_requirements_from_section(self, section: Section) -> List[Requirement]:
+ """从单个章节按文档顺序提取需求。"""
+ requirements: List[Requirement] = []
+ req_type = self._identify_requirement_type(section.title, section.content)
+
+ blocks = self._iter_section_blocks(section)
+ for block in blocks:
+ block_type = block.get("type", "text")
+ block_order = int(block.get("order", 0))
+
+ temp_section = Section(
+ level=section.level,
+ title=section.title,
+ number=section.number,
+ content="",
+ uid=section.uid,
+ )
+
+ if block_type == "text":
+ temp_section.content = block.get("text", "")
+ if self.llm:
+ block_reqs = self._llm_extract_requirements(temp_section, req_type)
+ else:
+ block_reqs = self._rule_extract_requirements(temp_section, req_type)
+ table_index = -1
+ else:
+ table_data = block.get("table", [])
+ temp_section.tables = [table_data] if table_data else []
+ table_index = int(block.get("table_index", -1))
+ if self.llm and self.settings.table_llm_semantic_enabled:
+ block_reqs = self._llm_extract_table_requirements(temp_section, req_type)
+ else:
+ block_reqs = self._rule_extract_requirements(temp_section, req_type)
+
+ for req in block_reqs:
+ self._global_order += 1
+ req.source_type = block_type
+ req.source_order = self._global_order
+ req.source_table_index = table_index
+ req.source_row_span = block.get("row_span", "")
+ req.description = self._maybe_light_rewrite(req.description, block_type)
+ requirements.append(req)
+
+ requirements = self._semantic_integrity_postprocess(requirements)
+ return self._deduplicate_requirements(requirements)
+
+ def _iter_section_blocks(self, section: Section) -> List[Dict[str, Any]]:
+ """返回章节中的顺序块(文本/表格)。"""
+ blocks: List[Dict[str, Any]] = []
+ if getattr(section, "blocks", None):
+ for idx, block in enumerate(section.blocks, 1):
+ block_type = block.get("type")
+ if block_type == "text":
+ text = (block.get("text") or "").strip()
+ if text:
+ blocks.append({"type": "text", "text": text, "order": idx})
+ elif block_type == "table":
+ table = block.get("table")
+ table_index = int(block.get("table_index", -1))
+ if table_index >= 0 and table_index < len(section.tables):
+ table = section.tables[table_index]
+ if table:
+ blocks.append(
+ {
+ "type": "table",
+ "table": table,
+ "table_index": table_index,
+ "order": idx,
+ }
+ )
+
+ if blocks:
+ return blocks
+
+ # 兼容旧解析结果:无顺序块时退化为文本后表格。
+ fallback_order = 1
+ text = (section.content or "").strip()
+ if text:
+ blocks.append({"type": "text", "text": text, "order": fallback_order})
+ fallback_order += 1
+ for table_index, table in enumerate(section.tables):
+ blocks.append(
+ {
+ "type": "table",
+ "table": table,
+ "table_index": table_index,
+ "order": fallback_order,
+ }
+ )
+ fallback_order += 1
+ return blocks
+
+ def _llm_extract_requirements(self, section: Section, req_type: str) -> List[Requirement]:
+ """使用LLM提取需求"""
+ requirements = []
+
+ content_text = section.content or ""
+ table_text = self._format_tables_for_prompt(section.tables)
+ if len(content_text.strip()) < 8 and not table_text:
+ return requirements
+
+ # 根据需求类型构建不同的提示词
+ if req_type == 'interface':
+ # 接口需求:允许改写润色,并提取接口详细信息
+ prompt = f"""请从以下SRS文档章节中提取具体的接口需求,并对需求描述进行改写润色。同时智能识别每个接口的详细信息。
+
+章节编号:{section.number}
+章节标题:{section.title}
+章节内容:
+{content_text}
+
+章节内表格(若有):
+{table_text if table_text else '无'}
+
+提取要求:
+1. 只提取具体的、可验证的接口需求
+2. 不要提取系统描述、背景说明等非需求内容
+3. 去除原文中的换行符、表格格式噪声
+4. 对提取的需求描述进行改写润色,使其更加清晰完整
+5. 每条需求应该是完整的句子,描述清楚接口规范
+6. 如果有多条需求,请分别列出
+7. 对于每条接口需求,请智能识别以下信息:
+ - interface_name: 接口名称
+ - interface_type: 接口类型 (如:CAN接口、以太网接口、串口等)
+ - source: 来源/发送方(数据或信号从哪里来)
+ - destination: 目的地/接收方(数据或信号发送到哪里)
+8. 如果某个字段无法从文本中识别,请填写"未知"
+9. 若原文给出需求编号,请优先使用原文编号(req_id)
+
+请以JSON格式输出,格式如下:
+{{
+ "requirements": [
+ {{
+ "req_id": "需求编号(如有)",
+ "description": "接口需求描述",
+ "interface_name": "接口名称",
+ "interface_type": "接口类型",
+ "source": "来源",
+ "destination": "目的地"
+ }}
+ ]
+}}
+
+如果该章节没有可提取的需求,返回空数组:
+{{"requirements": []}}
+
+JSON输出:"""
+ else:
+ # 功能需求、其他需求:以原文为主,允许轻微扩写补全
+ prompt = f"""请从以下SRS文档章节中提取具体的软件需求。以原文为主,允许轻微扩写补全语义。
+
+章节编号:{section.number}
+章节标题:{section.title}
+章节内容:
+{content_text}
+
+章节内表格(若有):
+{table_text if table_text else '无'}
+
+提取要求:
+1. 同时提取正文与表格中的具体、可验证的软件需求
+2. 不要提取系统描述、背景说明等非需求内容
+3. 需求描述应保留原文大部分词语(建议保留率>=70%),仅做轻微补充以增强语义完整性
+4. 严禁改变任何数值、阈值、状态名、信号名和逻辑条件
+5. 去除原文中的多余换行符和表格格式符号,但保留语句内容
+5. 每条需求应该是完整的句子
+6. 如果有多条需求,请分别列出
+7. 如果一段需求描述内有多条需求点,必须拆分成多个独立需求项
+8. 拆分判定:出现“并/并且/同时/然后/且/以及”,或一条句子中出现多个动作(如判断+监测+发送)时必须拆分
+9. 每条需求尽量满足“单一动作、可单独验证”
+8. 过滤重复或过于相似的需求,只保留独特的需求
+9. 若原文给出需求编号,请优先使用原文编号(req_id)
+
+请以JSON格式输出,格式如下:
+{{
+ "requirements": [
+ {{"req_id": "需求编号(如有)", "description": "需求描述1"}},
+ {{"req_id": "需求编号(如有)", "description": "需求描述2"}}
+ ]
+}}
+
+如果该章节没有可提取的需求,返回空数组:
+{{"requirements": []}}
+
+JSON输出:"""
+
+ try:
+ response = self.llm.call(prompt)
+ data = self._parse_llm_json_response(response)
+
+ if data and 'requirements' in data:
+ # 查找父需求编号(第一个合法完整编号的需求)
+ parent_req_id = ""
+ complete_id_pattern = r'^[A-Za-z0-9]{2,10}[-_].+$'
+ for req_data in data['requirements']:
+ temp_id = self._normalize_req_id(req_data.get('req_id', '') or req_data.get('id', ''))
+ if not temp_id:
+ temp_desc = req_data.get('description', '').strip()
+ temp_id, _ = self._extract_requirement_id_from_text(temp_desc)
+ # 验证是否为合法的完整编号格式
+ if temp_id and re.match(complete_id_pattern, temp_id):
+ parent_req_id = temp_id.replace('_', '-')
+ break
+
+ for i, req_data in enumerate(data['requirements'], 1):
+ desc = req_data.get('description', '').strip()
+ if desc and len(desc) > 5:
+ # 清理描述中的多余换行符和表格符号
+ desc = self._clean_description(desc)
+ split_descs = self._split_requirement_description(desc)
+ if not split_descs:
+ split_descs = [desc]
+
+ # 需求ID优先使用文档给出的编号
+ doc_req_id = self._normalize_req_id(req_data.get('req_id', '') or req_data.get('id', ''))
+ if not doc_req_id:
+ doc_req_id, desc = self._extract_requirement_id_from_text(desc)
+
+ for split_idx, split_desc in enumerate(split_descs, 1):
+ # 生成最终的需求ID(支持拆分后后缀)
+ req_id = self._generate_requirement_id(
+ req_type,
+ section.number,
+ i,
+ doc_req_id,
+ parent_req_id,
+ split_idx,
+ len(split_descs),
+ )
+
+ # 接口需求提取额外字段
+ interface_name = ""
+ interface_type = ""
+ source = ""
+ destination = ""
+ if req_type == 'interface':
+ interface_name = req_data.get('interface_name', '未知').strip()
+ interface_type = req_data.get('interface_type', '未知').strip()
+ source = req_data.get('source', '未知').strip()
+ destination = req_data.get('destination', '未知').strip()
+
+ req = Requirement(
+ req_id=req_id,
+ description=split_desc,
+ req_type=req_type,
+ section_number=section.number,
+ section_title=section.title,
+ section_uid=section.uid,
+ interface_name=interface_name,
+ interface_type=interface_type,
+ source=source,
+ destination=destination
+ )
+ requirements.append(req)
+ except Exception as e:
+ logger.warning(f"LLM提取需求失败: {e},使用规则提取")
+ return self._rule_extract_requirements(section, req_type)
+
+ return requirements
+
+ def _build_table_requirements_rule(self, section: Section, req_type: str, start_index: int) -> List[Requirement]:
+ """仅从表格构建规则需求,用于LLM模式补充召回。"""
+ requirements: List[Requirement] = []
+ table_requirements = self._extract_requirements_from_tables_rule(section.tables)
+ if not table_requirements:
+ return requirements
+
+ parent_req_id = ""
+ complete_id_pattern = r'^[A-Za-z0-9]{2,10}[-_].+$'
+ for temp_id, _ in table_requirements:
+ if temp_id and re.match(complete_id_pattern, temp_id):
+ parent_req_id = temp_id.replace('_', '-')
+ break
+
+ index = start_index
+ for doc_req_id, desc in table_requirements:
+ split_descs = self._split_requirement_description(desc)
+ if not split_descs:
+ split_descs = [desc]
+
+ for split_idx, split_desc in enumerate(split_descs, 1):
+ req_id = self._generate_requirement_id(
+ req_type=req_type,
+ section_number=section.number,
+ index=index,
+ doc_req_id=doc_req_id,
+ parent_req_id=parent_req_id,
+ split_index=split_idx,
+ split_total=len(split_descs),
+ )
+ requirements.append(
+ Requirement(
+ req_id=req_id,
+ description=split_desc,
+ req_type=req_type,
+ section_number=section.number,
+ section_title=section.title,
+ section_uid=section.uid,
+ )
+ )
+ index += 1
+
+ return requirements
+
+ def _llm_extract_table_requirements(self, section: Section, req_type: str) -> List[Requirement]:
+ """使用LLM语义化提取表格需求。"""
+ if not self.llm or not section.tables:
+ return self._rule_extract_requirements(section, req_type)
+
+ table = section.tables[0]
+ is_sequence_table = self._is_time_series_table(table)
+ table_text = self._format_tables_for_prompt([table])
+ merge_hint = "是" if is_sequence_table and self.settings.sequence_table_merge == "single_requirement" else "否"
+
+ prompt = f"""请从下列表格中提取并组织软件需求,要求以语义完整的需求句输出。
+
+章节编号:{section.number}
+章节标题:{section.title}
+需求类型:{req_type}
+该表是否按时间序列指令组织:{merge_hint}
+
+表格内容:
+{table_text}
+
+提取规则:
+1. 不是简单逐字抄表格,请结合列含义组织成完整需求句。
+2. 保留原文大部分关键词、阈值、数值、状态名,不得改变逻辑和数值。
+3. 允许轻微补充主语或上下文,使语义更完整。
+4. 若为时间序列指令表,优先合并为1条需求,描述完整执行序列。
+5. 若有明显独立语义点,可输出多条需求。
+
+请输出JSON:
+{{
+ "requirements": [
+ {{"req_id": "可为空", "description": "完整需求描述"}}
+ ]
+}}"""
+
+ try:
+ response = self.llm.call(prompt)
+ data = self._parse_llm_json_response(response)
+ requirements: List[Requirement] = []
+ if data and isinstance(data.get("requirements"), list):
+ for i, req_data in enumerate(data["requirements"], 1):
+ desc = self._clean_description(req_data.get("description", ""))
+ if not desc:
+ continue
+ doc_req_id = self._normalize_req_id(req_data.get("req_id", ""))
+ req_id = self._generate_requirement_id(req_type, section.number, i, doc_req_id, "")
+ requirements.append(
+ Requirement(
+ req_id=req_id,
+ description=desc,
+ req_type=req_type,
+ section_number=section.number,
+ section_title=section.title,
+ section_uid=section.uid,
+ source_type="table",
+ )
+ )
+
+ if not requirements:
+ return self._rule_extract_requirements(section, req_type)
+ return requirements
+ except Exception as e:
+ logger.warning(f"LLM表格语义化提取失败,回退规则模式: {e}")
+ return self._rule_extract_requirements(section, req_type)
+
+ def _maybe_light_rewrite(self, description: str, source_type: str) -> str:
+ """仅在LLM模式做轻微扩写,且通过保真校验。"""
+ description = self._clean_description(description)
+ if not description:
+ return description
+ if not self.llm or not self.settings.llm_light_rewrite_enabled:
+ return description
+
+ need_rewrite = source_type == "table" or len(description) < 28
+ if not need_rewrite:
+ return description
+
+ prompt = f"""请对下面需求做轻微扩写,使语义更完整。
+
+原文:{description}
+
+要求:
+1. 保留原文大部分表述,不改变核心语义。
+2. 不得修改任何数值、阈值、状态名称、信号名称。
+3. 只允许补充必要主语/宾语,长度尽量控制在原文的1.25倍以内。
+4. 仅返回改写后的单句文本。"""
+
+ try:
+ rewritten = self._clean_description(self.llm.call(prompt))
+ if not rewritten:
+ return description
+
+ preserve_ratio = self._calculate_preserve_ratio(description, rewritten)
+ growth_ratio = len(rewritten) / max(len(description), 1)
+ if preserve_ratio < self.settings.preserve_ratio_min:
+ return description
+ if growth_ratio > self.settings.max_length_growth_ratio:
+ return description
+ if not self._numbers_consistent(description, rewritten):
+ return description
+ return rewritten
+ except Exception:
+ return description
+
+ def _calculate_preserve_ratio(self, original: str, rewritten: str) -> float:
+ original_tokens = [c for c in re.sub(r"\s+", "", original) if c]
+ rewritten_tokens = set(c for c in re.sub(r"\s+", "", rewritten) if c)
+ if not original_tokens:
+ return 1.0
+ hit = sum(1 for c in original_tokens if c in rewritten_tokens)
+ return hit / max(len(original_tokens), 1)
+
+ def _numbers_consistent(self, original: str, rewritten: str) -> bool:
+ pattern = r"[<>≤≥]?\d+(?:\.\d+)?(?:[A-Za-z%]*)"
+ orig_nums = set(re.findall(pattern, original))
+ rewrite_nums = set(re.findall(pattern, rewritten))
+ return orig_nums.issubset(rewrite_nums)
+
+ def _semantic_integrity_postprocess(self, requirements: List[Requirement]) -> List[Requirement]:
+ """语义完整性后处理:合并被误拆的紧耦合需求链。"""
+ if not self.settings.semantic_guard_enabled or not requirements:
+ return requirements
+
+ merged: List[Requirement] = [requirements[0]]
+ for req in requirements[1:]:
+ prev = merged[-1]
+ if self._should_merge_semantic(prev, req):
+ prev.description = self._clean_description(
+ f"{prev.description.rstrip(';;。')};{req.description.lstrip(';;。')}"
+ )
+ else:
+ merged.append(req)
+ return merged
+
+ def _should_merge_semantic(self, prev: Requirement, curr: Requirement) -> bool:
+ if prev.section_uid != curr.section_uid or prev.type != curr.type:
+ return False
+
+ prev_desc = prev.description
+ curr_desc = curr.description
+
+ if curr_desc.startswith(("该", "其", "上述", "此", "该报警", "该信号")):
+ return True
+ if self.settings.preserve_alarm_chain and ("报警" in prev_desc and "持续" in curr_desc):
+ return True
+ if self.settings.preserve_condition_action_chain:
+ if "进入整星安全模式" in prev_desc and ("过放电模式" in curr_desc or "发送" in curr_desc):
+ return True
+ if "若蓄电池充电" in prev_desc and (
+ "退出低功耗模式" in curr_desc or "热控" in curr_desc or "姿控" in curr_desc
+ ):
+ return True
+ if ("产生" in prev_desc and "报警" in prev_desc and "持续" in curr_desc):
+ return True
+ return False
+
+ def _renumber_requirements_continuous(self, requirements: List[Requirement]) -> List[Requirement]:
+ """按文档顺序对去重后的需求重新连续编号。"""
+ if not requirements:
+ return requirements
+
+ ordered = sorted(requirements, key=lambda r: (r.source_order, r.section_number or ""))
+ counters: Dict[Tuple[str, str], int] = {}
+
+ for req in ordered:
+ section_key = req.section_uid or req.section_number or "NA"
+ prefix = self.settings.type_prefix.get(req.type, "FR")
+ counter_key = (section_key, prefix)
+ counters[counter_key] = counters.get(counter_key, 0) + 1
+ section_part = req.section_number if req.section_number else "NA"
+ req.id = f"{prefix}-{section_part}-{counters[counter_key]}"
+
+ return ordered
+
+ def _rule_extract_requirements(self, section: Section, req_type: str) -> List[Requirement]:
+ """使用规则提取需求(备用方法)"""
+ requirements = []
+ content = section.content
+
+ # 正文需求
+ descriptions = []
+ if content and len(content.strip()) >= 8:
+ descriptions = self._extract_list_items(content)
+
+ if not descriptions:
+ # 如果没有列表项,将整个内容作为一个需求
+ desc = self._clean_description(content)
+ if len(desc) > 5 and not section.tables:
+ descriptions = [f"{section.title}:{desc}"]
+
+ # 表格需求
+ table_requirements = self._extract_requirements_from_tables_rule(section.tables)
+
+ # 查找父需求编号(第一个合法完整编号)
+ parent_req_id = ""
+ complete_id_pattern = r'^[A-Za-z0-9]{2,10}[-_].+$'
+ for desc in descriptions:
+ temp_id, _ = self._extract_requirement_id_from_text(desc)
+ # 验证是否为合法的完整编号格式
+ if temp_id and re.match(complete_id_pattern, temp_id):
+ parent_req_id = temp_id.replace('_', '-')
+ break
+ if not parent_req_id:
+ for temp_id, _ in table_requirements:
+ # 验证是否为合法的完整编号格式
+ if temp_id and re.match(complete_id_pattern, temp_id):
+ parent_req_id = temp_id.replace('_', '-')
+ break
+
+ index = 1
+ for desc in descriptions:
+ desc = self._clean_description(desc)
+ if len(desc) > 5:
+ doc_req_id, cleaned_desc = self._extract_requirement_id_from_text(desc)
+ split_descs = self._split_requirement_description(cleaned_desc)
+ if not split_descs:
+ split_descs = [cleaned_desc]
+
+ for split_idx, split_desc in enumerate(split_descs, 1):
+ req_id = self._generate_requirement_id(
+ req_type,
+ section.number,
+ index,
+ doc_req_id,
+ parent_req_id,
+ split_idx,
+ len(split_descs),
+ )
+ req = Requirement(
+ req_id=req_id,
+ description=split_desc,
+ req_type=req_type,
+ section_number=section.number,
+ section_title=section.title,
+ section_uid=section.uid
+ )
+ requirements.append(req)
+ index += 1
+
+ for doc_req_id, desc in table_requirements:
+ split_descs = self._split_requirement_description(desc)
+ if not split_descs:
+ split_descs = [desc]
+
+ for split_idx, split_desc in enumerate(split_descs, 1):
+ req_id = self._generate_requirement_id(
+ req_type,
+ section.number,
+ index,
+ doc_req_id,
+ parent_req_id,
+ split_idx,
+ len(split_descs),
+ )
+ req = Requirement(
+ req_id=req_id,
+ description=split_desc,
+ req_type=req_type,
+ section_number=section.number,
+ section_title=section.title,
+ section_uid=section.uid
+ )
+ requirements.append(req)
+ index += 1
+
+ return requirements
+
+ def _extract_list_items(self, content: str) -> List[str]:
+ """提取列表项"""
+ items = []
+
+ # 模式1: a) b) c) 或 1) 2) 3)
+ patterns = [
+ r'([a-z][\))])\s*(.+?)(?=[a-z][\))]|$)',
+ r'(\d+[\))])\s*(.+?)(?=\d+[\))]|$)',
+ r'([①②③④⑤⑥⑦⑧⑨⑩])\s*(.+?)(?=[①②③④⑤⑥⑦⑧⑨⑩]|$)'
+ ]
+
+ for pattern in patterns:
+ matches = re.findall(pattern, content, re.DOTALL)
+ if matches:
+ for marker, text in matches:
+ text = text.strip()
+ if text and len(text) > 5:
+ items.append(text)
+ break
+
+ return items
+
+ def _identify_requirement_type(self, title: str, content: str) -> str:
+ """
+ 通过标题和内容识别需求类型
+
+ 根据章节标题和内容判断需求类型:
+ - 标题或内容中包含"接口"相关词汇 -> 接口需求
+ - 其他情况 -> 功能需求(默认)
+
+ 注意:不能仅靠标题判断是否为功能需求,若无法识别具体类型,默认为功能需求
+ """
+ return self.settings.detect_requirement_type(title, content)
+
+ def _generate_requirement_id(self, req_type: str, section_number: str, index: int,
+ doc_req_id: str = "", parent_req_id: str = "",
+ split_index: int = 1, split_total: int = 1) -> str:
+ """
+ 生成需求ID(三级优先级)
+
+ 优先级规则:
+ 1. 如果doc_req_id是合法的完整编号(以2-10个字母或数字开头,后跟分隔符),直接使用
+ 例如: NY01-01、FR-3.1.2-1、AIRSAT07-GD03-04
+ 2. 如果doc_req_id是代号/序号,且有parent_req_id,则组合
+ 格式: {parent_req_id}-{doc_req_id},例如: NY01-01-K101
+ 3. 否则自动生成
+ 格式: {PREFIX}-{section_number}-{index},例如: IR-4.1.1-1(保留章节号中的点号)
+
+ Args:
+ req_type: 需求类型
+ section_number: 章节编号
+ index: 序号
+ doc_req_id: 文档中提取的编号/代号
+ parent_req_id: 父需求编号(用于子需求)
+ """
+ return self.id_generator.generate(
+ req_type=req_type,
+ section_number=section_number,
+ index=index,
+ doc_req_id=doc_req_id,
+ parent_req_id=parent_req_id,
+ split_index=split_index,
+ split_total=split_total,
+ )
+
+ def _normalize_req_id(self, req_id: str) -> str:
+ """规范化需求编号"""
+ return self.id_generator.normalize(req_id)
+
+ def _clean_description(self, text: str) -> str:
+ """清理需求描述"""
+ # 替换换行符为空格
+ text = re.sub(r'\n+', ' ', text)
+ # 替换多个空格为单个空格
+ text = re.sub(r'\s+', ' ', text)
+ # 去除表格噪声
+ text = re.sub(r'[\|│┃]+', ' ', text)
+ # 去除首尾空白
+ text = text.strip()
+ # 限制长度
+ if len(text) > 1000:
+ text = text[:1000] + '...'
+ return text
+
+ def _format_tables_for_prompt(self, tables: List[List[List[str]]]) -> str:
+ """格式化表格内容用于LLM提示词"""
+ if not tables:
+ return ""
+ lines = []
+ for idx, table in enumerate(tables, 1):
+ lines.append(f"表格{idx}:")
+ for row in table:
+ row_text = " | ".join(self._clean_description(cell) for cell in row if cell)
+ if row_text:
+ lines.append(row_text)
+ return "\n".join(lines)
+
+ def _extract_requirement_id_from_text(self, text: str) -> Tuple[Optional[str], str]:
+ """
+ 从文本中提取需求编号
+
+ 支持的格式:
+ 1. 完整编号:NY01-01、FR-3.1.2-1
+ 2. 代号/序号:K101、D61、a)、1)
+ """
+ return self.id_generator.extract_from_text(text)
+
+ def _split_requirement_description(self, text: str) -> List[str]:
+ if not text:
+ return []
+ if "时间序列" in text and "执行指令" in text:
+ return [text]
+ if not self.splitter:
+ return [text]
+ return self.splitter.split(text)
+
+ def _deduplicate_requirements(self, requirements: List[Requirement]) -> List[Requirement]:
+ seen = set()
+ deduped: List[Requirement] = []
+ for req in requirements:
+ normalized_desc = re.sub(r'\s+', ' ', req.description).strip().lower()
+ key = (req.type, normalized_desc)
+ if key in seen:
+ continue
+ seen.add(key)
+ deduped.append(req)
+ return deduped
+
+ def _extract_requirements_from_tables_rule(self, tables: List[List[List[str]]]) -> List[Tuple[Optional[str], str]]:
+ """从表格中提取需求(规则方式)"""
+ results = []
+ if not tables:
+ return results
+
+ id_keywords = ['需求编号', '编号', '序号', 'id', 'ID']
+ desc_keywords = ['需求', '描述', '内容', '说明', '要求']
+
+ for table in tables:
+ if not table:
+ continue
+
+ if self._is_time_series_table(table) and self.settings.sequence_table_merge == "single_requirement":
+ merged_desc = self._build_sequence_table_requirement(table)
+ if merged_desc:
+ results.append((None, merged_desc))
+ continue
+
+ header = table[0] if table else []
+ header_lower = [h.lower() for h in header]
+ id_idx = None
+ desc_idx = None
+ for i, h in enumerate(header_lower):
+ if any(k.lower() in h for k in id_keywords):
+ id_idx = i
+ if any(k.lower() in h for k in desc_keywords):
+ desc_idx = i
+
+ start_row = 1 if (id_idx is not None or desc_idx is not None) else 0
+ for row in table[start_row:]:
+ if not row:
+ continue
+ row = [self._clean_description(cell) for cell in row]
+ if not any(row):
+ continue
+
+ req_id = None
+ desc = ""
+ if id_idx is not None and id_idx < len(row):
+ req_id = self._normalize_req_id(row[id_idx])
+ if desc_idx is not None and desc_idx < len(row):
+ desc = row[desc_idx]
+ if not desc:
+ # 如果无明确描述列,拼接整行作为描述
+ desc = " | ".join([cell for cell in row if cell])
+
+ # 若描述里包含编号,尝试再次提取
+ if not req_id:
+ req_id, desc = self._extract_requirement_id_from_text(desc)
+
+ if desc and len(desc) > 5:
+ results.append((req_id, desc))
+
+ return results
+
+ def _is_time_series_table(self, table: List[List[str]]) -> bool:
+ if not table:
+ return False
+
+ header = " ".join(cell for cell in table[0] if cell)
+ header_has_time = any(k in header for k in ["时间", "时刻", "time", "TIME", "T0"])
+ header_has_action = any(k in header for k in ["指令", "动作", "行为", "操作", "名称"])
+
+ time_pattern = re.compile(r"^T\s*0(?:\s*[++-]\s*\d+\s*[sS秒]?)?$")
+ data_rows = table[1:] if len(table) > 1 else []
+ time_like_rows = 0
+ for row in data_rows:
+ if not row:
+ continue
+ first_cell = (row[0] or "").strip() if row else ""
+ if time_pattern.match(first_cell):
+ time_like_rows += 1
+
+ return (header_has_time and header_has_action) or (time_like_rows >= self.settings.merge_time_series_rows_min)
+
+ def _build_sequence_table_requirement(self, table: List[List[str]]) -> str:
+ if not table or len(table) < 2:
+ return ""
+
+ header = table[0]
+ time_idx = 0
+ action_idx = 1 if len(header) > 1 else 0
+ for i, col in enumerate(header):
+ col_text = (col or "")
+ if any(k in col_text for k in ["时间", "时刻", "time", "TIME"]):
+ time_idx = i
+ if any(k in col_text for k in ["指令", "动作", "行为", "操作", "名称"]):
+ action_idx = i
+
+ sequence_parts = []
+ for row in table[1:]:
+ if not row:
+ continue
+ row = [self._clean_description(c) for c in row]
+ if not any(row):
+ continue
+ t = row[time_idx] if time_idx < len(row) else ""
+ a = row[action_idx] if action_idx < len(row) else ""
+ if t and a:
+ sequence_parts.append(f"{t}执行{a}")
+ elif a:
+ sequence_parts.append(a)
+
+ if not sequence_parts:
+ return ""
+ return "系统应按以下时间序列依次执行指令:" + ";".join(sequence_parts)
+
+ def _parse_llm_json_response(self, response: str) -> Optional[Dict]:
+ """解析LLM的JSON响应"""
+ try:
+ return json.loads(response)
+ except json.JSONDecodeError:
+ # 尝试提取JSON代码块
+ try:
+ json_match = re.search(r'```(?:json)?\s*(.*?)\s*```', response, re.DOTALL)
+ if json_match:
+ return json.loads(json_match.group(1))
+
+ # 尝试查找JSON对象
+ json_match = re.search(r'\{.*\}', response, re.DOTALL)
+ if json_match:
+ return json.loads(json_match.group(0))
+ except Exception:
+ pass
+
+ logger.warning(f"无法解析LLM响应为JSON: {response[:200]}")
+ return None
+
+ def get_statistics(self) -> Dict:
+ """获取需求统计信息"""
+ stats = {
+ 'total': len(self.requirements),
+ 'by_type': {}
+ }
+
+ for req in self.requirements:
+ req_type = req.type
+ if req_type not in stats['by_type']:
+ stats['by_type'][req_type] = 0
+ stats['by_type'][req_type] += 1
+
+ return stats
diff --git a/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/requirement_id_generator.py b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/requirement_id_generator.py
new file mode 100644
index 0000000..564caf1
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/requirement_id_generator.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+"""
+需求编号生成与提取工具。
+"""
+
+import re
+from typing import Optional, Tuple, Dict
+
+
+class RequirementIDGenerator:
+ def __init__(self, type_prefix: Dict[str, str]):
+ self.type_prefix = type_prefix
+
+ def normalize(self, req_id: str) -> str:
+ if not req_id:
+ return ""
+ return str(req_id).strip()
+
+ def extract_from_text(self, text: str) -> Tuple[Optional[str], str]:
+ if not text:
+ return None, text
+
+ pattern1 = r"^\s*([A-Za-z]{2,10}[-_]\d+(?:[-.\d]+)*)\s*[::\)\]】]?\s*(.+)$"
+ match = re.match(pattern1, text)
+ if match:
+ return match.group(1).strip(), match.group(2).strip()
+
+ pattern2 = r"^\s*([A-Za-z]\d+)\s*[::\)\]】]?\s*(.+)$"
+ match = re.match(pattern2, text)
+ if match:
+ return match.group(1).strip(), match.group(2).strip()
+
+ pattern3 = r"^\s*([a-z0-9]{1,2}[\))])\s*(.+)$"
+ match = re.match(pattern3, text)
+ if match:
+ code = match.group(1).strip().rstrip("))")
+ return code, match.group(2).strip()
+
+ return None, text
+
+ def generate(
+ self,
+ req_type: str,
+ section_number: str,
+ index: int,
+ doc_req_id: str = "",
+ parent_req_id: str = "",
+ split_index: int = 1,
+ split_total: int = 1,
+ ) -> str:
+ base_id = self._generate_base(req_type, section_number, index, doc_req_id, parent_req_id)
+ if split_total > 1:
+ return f"{base_id}-S{split_index}"
+ return base_id
+
+ def _generate_base(
+ self,
+ req_type: str,
+ section_number: str,
+ index: int,
+ doc_req_id: str,
+ parent_req_id: str,
+ ) -> str:
+ if doc_req_id:
+ complete_id_pattern = r"^[A-Za-z0-9]{2,10}[-_].+$"
+ if re.match(complete_id_pattern, doc_req_id):
+ return doc_req_id.replace("_", "-")
+
+ if doc_req_id and parent_req_id:
+ return f"{parent_req_id}-{doc_req_id}"
+
+ prefix = self.type_prefix.get(req_type, "FR")
+ section_part = section_number if section_number else "NA"
+ return f"{prefix}-{section_part}-{index}"
diff --git a/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/requirement_splitter.py b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/requirement_splitter.py
new file mode 100644
index 0000000..b062082
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/requirement_splitter.py
@@ -0,0 +1,188 @@
+# -*- coding: utf-8 -*-
+"""
+需求长句拆分器。
+将复合长句拆分为可验证的原子需求片段。
+"""
+
+import re
+from typing import List
+
+
+class RequirementSplitter:
+ ACTION_HINTS = [
+ "产生",
+ "发送",
+ "设置",
+ "进入",
+ "退出",
+ "关闭",
+ "开启",
+ "监测",
+ "判断",
+ "记录",
+ "上传",
+ "重启",
+ "恢复",
+ "关断",
+ "断电",
+ "加电",
+ "执行",
+ "进行",
+ ]
+
+ CONNECTOR_HINTS = ["并", "并且", "同时", "然后", "且", "以及", "及"]
+ CONDITIONAL_HINTS = ["如果", "当", "若", "在", "其中", "此时", "满足"]
+ CONTEXT_PRONOUN_HINTS = ["该", "其", "上述", "此", "这些", "那些"]
+
+ def __init__(self, max_sentence_len: int = 120, min_clause_len: int = 12):
+ self.max_sentence_len = max_sentence_len
+ self.min_clause_len = min_clause_len
+
+ def split(self, text: str) -> List[str]:
+ cleaned = self._clean(text)
+ if not cleaned:
+ return []
+
+ if self._contains_strong_semantic_chain(cleaned):
+ return [cleaned]
+
+ # 先按强分隔符切分为主片段。
+ base_parts = self._split_by_strong_punctuation(cleaned)
+
+ result: List[str] = []
+ for part in base_parts:
+ if len(part) <= self.max_sentence_len:
+ result.append(part)
+ continue
+
+ # 对超长片段进一步基于逗号和连接词拆分。
+ refined = self._split_long_clause(part)
+ result.extend(refined)
+
+ result = self._merge_semantic_chain(result)
+ result = self._merge_too_short(result)
+ return self._deduplicate(result)
+
+ def _contains_strong_semantic_chain(self, text: str) -> bool:
+ # 条件-动作链完整时,避免强拆。
+ has_conditional = any(h in text for h in ["如果", "若", "当"])
+ has_result = "则" in text or "时" in text
+ action_count = sum(1 for h in self.ACTION_HINTS if h in text)
+ if has_conditional and has_result and action_count >= 2:
+ return True
+ return False
+
+ def _clean(self, text: str) -> str:
+ text = re.sub(r"\s+", " ", text or "")
+ return text.strip(" ;;。")
+
+ def _split_by_strong_punctuation(self, text: str) -> List[str]:
+ chunks = re.split(r"[;;。]", text)
+ return [c.strip(" ,,") for c in chunks if c and c.strip(" ,,")]
+
+ def _split_long_clause(self, clause: str) -> List[str]:
+ if self._contains_strong_semantic_chain(clause):
+ return [clause]
+
+ raw_parts = [x.strip() for x in re.split(r"[,,]", clause) if x.strip()]
+ if len(raw_parts) <= 1:
+ return [clause]
+
+ assembled: List[str] = []
+ current = raw_parts[0]
+
+ for fragment in raw_parts[1:]:
+ if self._should_split(current, fragment):
+ assembled.append(current.strip())
+ current = fragment
+ else:
+ current = f"{current},{fragment}"
+
+ if current.strip():
+ assembled.append(current.strip())
+
+ return assembled
+
+ def _should_split(self, current: str, fragment: str) -> bool:
+ if len(current) < self.min_clause_len:
+ return False
+
+ # 指代承接片段通常是语义延续,不应切断。
+ if any(fragment.startswith(h) for h in self.CONTEXT_PRONOUN_HINTS):
+ return False
+
+ # 条件链中带“则/并/同时”的后继片段,优先保持在同一需求中。
+ if self._contains_strong_semantic_chain(current + "," + fragment):
+ return False
+
+ frag_starts_with_condition = any(fragment.startswith(h) for h in self.CONDITIONAL_HINTS)
+ if frag_starts_with_condition:
+ return False
+
+ has_connector = any(fragment.startswith(h) for h in self.CONNECTOR_HINTS)
+ has_action = any(h in fragment for h in self.ACTION_HINTS)
+ current_has_action = any(h in current for h in self.ACTION_HINTS)
+
+ # 连接词 + 动作词,且当前片段已经包含动作,优先拆分。
+ if has_connector and has_action and current_has_action:
+ return True
+
+ # 无连接词但出现新的动作片段且整体过长,也拆分。
+ if has_action and current_has_action and len(current) >= self.max_sentence_len // 2:
+ return True
+
+ return False
+
+ def _merge_semantic_chain(self, parts: List[str]) -> List[str]:
+ if not parts:
+ return []
+
+ merged: List[str] = [parts[0]]
+ for part in parts[1:]:
+ prev = merged[-1]
+ if self._should_merge(prev, part):
+ merged[-1] = f"{prev};{part}"
+ else:
+ merged.append(part)
+ return merged
+
+ def _should_merge(self, prev: str, current: str) -> bool:
+ # 指代开头:如“该报警信号...”。
+ if any(current.startswith(h) for h in self.CONTEXT_PRONOUN_HINTS):
+ return True
+
+ # 报警触发后的持续条件与动作属于同一链。
+ if ("报警" in prev and "持续" in current) or ("产生" in prev and "报警" in prev and "持续" in current):
+ return True
+
+ # 状态迁移 + 后续控制动作保持合并。
+ if ("进入" in prev or "设置" in prev or "发送" in prev) and ("则" in current or "连续" in current):
+ return True
+
+ # 条件链分裂片段重新合并。
+ if self._contains_strong_semantic_chain(prev + "," + current):
+ return True
+
+ return False
+
+ def _merge_too_short(self, parts: List[str]) -> List[str]:
+ if not parts:
+ return []
+
+ merged: List[str] = []
+ for part in parts:
+ if merged and len(part) < self.min_clause_len:
+ merged[-1] = f"{merged[-1]},{part}"
+ else:
+ merged.append(part)
+ return merged
+
+ def _deduplicate(self, parts: List[str]) -> List[str]:
+ seen = set()
+ result = []
+ for part in parts:
+ key = re.sub(r"\s+", "", part)
+ if key and key not in seen:
+ seen.add(key)
+ result.append(part)
+ return result
diff --git a/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/settings.py b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/settings.py
new file mode 100644
index 0000000..55e7fc0
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/settings.py
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+"""
+统一配置与映射模块。
+将需求类型、章节过滤、输出映射和拆分参数收敛到单一入口。
+"""
+
+from dataclasses import dataclass
+from typing import Dict, List, Any
+
+
+@dataclass
+class RequirementTypeRule:
+ key: str
+ chinese_name: str
+ prefix: str
+ keywords: List[str]
+ priority: int
+
+
+class AppSettings:
+ """从 config 读取并提供统一访问接口。"""
+
+ TYPE_NAME_MAP = {
+ "功能需求": "functional",
+ "接口需求": "interface",
+ "性能需求": "performance",
+ "安全需求": "security",
+ "可靠性需求": "reliability",
+ "其他需求": "other",
+ }
+
+ DEFAULT_NON_REQUIREMENT_SECTIONS = [
+ "标识",
+ "系统概述",
+ "文档概述",
+ "引用文档",
+ "合格性规定",
+ "需求可追踪性",
+ "注释",
+ "附录",
+ "范围",
+ "概述",
+ ]
+
+ DEFAULT_TYPE_CHINESE = {
+ "functional": "功能需求",
+ "interface": "接口需求",
+ "performance": "其他需求",
+ "security": "其他需求",
+ "reliability": "其他需求",
+ "other": "其他需求",
+ }
+
+ DEFAULT_PREFIX = {
+ "functional": "FR",
+ "interface": "IR",
+ "performance": "PR",
+ "security": "SR",
+ "reliability": "RR",
+ "other": "OR",
+ }
+
+ def __init__(self, config: Dict[str, Any] = None):
+ self.config = config or {}
+
+ document_cfg = self.config.get("document", {})
+ self.non_requirement_sections = document_cfg.get(
+ "non_requirement_sections", self.DEFAULT_NON_REQUIREMENT_SECTIONS
+ )
+
+ extraction_cfg = self.config.get("extraction", {})
+ req_types_cfg = extraction_cfg.get("requirement_types", {})
+
+ self.requirement_rules = self._build_rules(req_types_cfg)
+ self.type_prefix = self._build_type_prefix(req_types_cfg)
+ self.type_chinese = self._build_type_chinese(req_types_cfg)
+
+ splitter_cfg = extraction_cfg.get("splitter", {})
+ self.splitter_max_sentence_len = int(splitter_cfg.get("max_sentence_len", 120))
+ self.splitter_min_clause_len = int(splitter_cfg.get("min_clause_len", 12))
+ self.splitter_enabled = bool(splitter_cfg.get("enabled", True))
+
+ semantic_cfg = extraction_cfg.get("semantic_guard", {})
+ self.semantic_guard_enabled = bool(semantic_cfg.get("enabled", True))
+ self.preserve_condition_action_chain = bool(
+ semantic_cfg.get("preserve_condition_action_chain", True)
+ )
+ self.preserve_alarm_chain = bool(semantic_cfg.get("preserve_alarm_chain", True))
+
+ table_cfg = extraction_cfg.get("table_strategy", {})
+ self.table_llm_semantic_enabled = bool(table_cfg.get("llm_semantic_enabled", True))
+ self.sequence_table_merge = table_cfg.get("sequence_table_merge", "single_requirement")
+ self.merge_time_series_rows_min = int(table_cfg.get("merge_time_series_rows_min", 3))
+
+ rewrite_cfg = extraction_cfg.get("rewrite_policy", {})
+ self.llm_light_rewrite_enabled = bool(rewrite_cfg.get("llm_light_rewrite_enabled", True))
+ self.preserve_ratio_min = float(rewrite_cfg.get("preserve_ratio_min", 0.65))
+ self.max_length_growth_ratio = float(rewrite_cfg.get("max_length_growth_ratio", 1.25))
+
+ renumber_cfg = extraction_cfg.get("renumber_policy", {})
+ self.renumber_enabled = bool(renumber_cfg.get("enabled", True))
+ self.renumber_mode = renumber_cfg.get("mode", "section_continuous")
+
+ def _build_rules(self, req_types_cfg: Dict[str, Dict[str, Any]]) -> List[RequirementTypeRule]:
+ rules: List[RequirementTypeRule] = []
+ if not req_types_cfg:
+ # 用默认两类保证兼容旧行为
+ return [
+ RequirementTypeRule(
+ key="interface",
+ chinese_name="接口需求",
+ prefix="IR",
+ keywords=["接口", "interface", "api", "串口", "通信", "CAN", "以太网"],
+ priority=1,
+ ),
+ RequirementTypeRule(
+ key="functional",
+ chinese_name="功能需求",
+ prefix="FR",
+ keywords=["功能", "控制", "处理", "监测", "显示"],
+ priority=2,
+ ),
+ ]
+
+ for zh_name, item in req_types_cfg.items():
+ key = self.TYPE_NAME_MAP.get(zh_name, "other")
+ rules.append(
+ RequirementTypeRule(
+ key=key,
+ chinese_name=zh_name,
+ prefix=item.get("prefix", self.DEFAULT_PREFIX.get(key, "FR")),
+ keywords=item.get("keywords", []),
+ priority=int(item.get("priority", 99)),
+ )
+ )
+
+ return sorted(rules, key=lambda x: x.priority)
+
+ def _build_type_prefix(self, req_types_cfg: Dict[str, Dict[str, Any]]) -> Dict[str, str]:
+ mapping = dict(self.DEFAULT_PREFIX)
+ for zh_name, key in self.TYPE_NAME_MAP.items():
+ if zh_name in req_types_cfg:
+ mapping[key] = req_types_cfg[zh_name].get("prefix", mapping[key])
+ return mapping
+
+ def _build_type_chinese(self, req_types_cfg: Dict[str, Dict[str, Any]]) -> Dict[str, str]:
+ mapping = dict(self.DEFAULT_TYPE_CHINESE)
+ for zh_name, key in self.TYPE_NAME_MAP.items():
+ if zh_name in req_types_cfg:
+ mapping[key] = zh_name
+ return mapping
+
+ def is_non_requirement_section(self, title: str) -> bool:
+ return any(keyword in title for keyword in self.non_requirement_sections)
+
+ def detect_requirement_type(self, title: str, content: str) -> str:
+ combined_text = f"{title} {(content or '')[:500]}".lower()
+ for rule in self.requirement_rules:
+ for keyword in rule.keywords:
+ if keyword.lower() in combined_text:
+ return rule.key
+ return "functional"
diff --git a/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/utils.py b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/utils.py
new file mode 100644
index 0000000..53e5a65
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/srs_reqs_qwen/src/utils.py
@@ -0,0 +1,134 @@
+# src/utils.py
+"""
+工具函数模块 - 提供各种辅助功能
+"""
+
+import os
+import logging
+from pathlib import Path
+from typing import Dict, Any, List, Optional
+import yaml
+
+logger = logging.getLogger(__name__)
+
+
+def load_config(config_path: str = None) -> Dict[str, Any]:
+ """
+ 加载配置文件
+
+ Args:
+ config_path: 配置文件路径,如果为None则使用默认路径
+
+ Returns:
+ 配置字典
+ """
+ if config_path is None:
+ config_path = os.path.join(os.path.dirname(__file__), '..', 'config.yaml')
+
+ if not os.path.exists(config_path):
+ logger.warning(f"配置文件不存在: {config_path}")
+ return {}
+
+ try:
+ with open(config_path, 'r', encoding='utf-8') as f:
+ config = yaml.safe_load(f)
+ logger.info(f"成功加载配置文件: {config_path}")
+ return config or {}
+ except Exception as e:
+ logger.error(f"加载配置文件失败: {e}")
+ return {}
+
+
+def setup_logging(config: Dict[str, Any]) -> None:
+ """
+ 配置日志系统
+
+ Args:
+ config: 配置字典
+ """
+ logging_config = config.get('logging', {})
+ level = logging_config.get('level', 'INFO')
+ log_format = logging_config.get('format', '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ log_file = logging_config.get('file', None)
+
+ # 创建logger
+ logging.basicConfig(
+ level=getattr(logging, level),
+ format=log_format,
+ handlers=[
+ logging.StreamHandler(),
+ logging.FileHandler(log_file) if log_file else logging.NullHandler()
+ ]
+ )
+
+
+def validate_file_path(file_path: str, allowed_extensions: List[str] = None) -> bool:
+ """
+ 验证文件路径的合法性
+
+ Args:
+ file_path: 文件路径
+ allowed_extensions: 允许的文件扩展名列表(如['.pdf', '.docx'])
+
+ Returns:
+ 文件是否合法
+ """
+ if not os.path.exists(file_path):
+ logger.error(f"文件不存在: {file_path}")
+ return False
+
+ if not os.path.isfile(file_path):
+ logger.error(f"路径不是文件: {file_path}")
+ return False
+
+ if allowed_extensions:
+ ext = Path(file_path).suffix.lower()
+ if ext not in allowed_extensions:
+ logger.error(f"不支持的文件格式: {ext}")
+ return False
+
+ return True
+
+
+def ensure_directory_exists(directory: str) -> bool:
+ """
+ 确保目录存在,如果不存在则创建
+
+ Args:
+ directory: 目录路径
+
+ Returns:
+ 目录是否存在或创建成功
+ """
+ try:
+ Path(directory).mkdir(parents=True, exist_ok=True)
+ return True
+ except Exception as e:
+ logger.error(f"创建目录失败: {e}")
+ return False
+
+
+def get_env_or_config(env_var: str, config_dict: Dict[str, Any],
+ default: Any = None) -> Any:
+ """
+ 优先从环境变量读取,其次从配置字典读取
+
+ Args:
+ env_var: 环境变量名
+ config_dict: 配置字典
+ default: 默认值
+
+ Returns:
+ 获取到的值
+ """
+ # 尝试从环境变量读取
+ env_value = os.environ.get(env_var)
+ if env_value:
+ return env_value
+
+ # 尝试从配置字典读取
+ config_value = config_dict.get(env_var)
+ if config_value and not config_value.startswith('${'):
+ return config_value
+
+ return default
diff --git a/rag-web-ui/backend/app/tools/srs_reqs_qwen/tool.py b/rag-web-ui/backend/app/tools/srs_reqs_qwen/tool.py
new file mode 100644
index 0000000..70acaf7
--- /dev/null
+++ b/rag-web-ui/backend/app/tools/srs_reqs_qwen/tool.py
@@ -0,0 +1,148 @@
+from __future__ import annotations
+
+import os
+from pathlib import Path
+from typing import Any, Dict, List
+
+import yaml
+
+from app.core.config import settings
+from app.tools.base import ToolDefinition
+from app.tools.registry import ToolRegistry
+from app.tools.srs_reqs_qwen.src.document_parser import create_parser
+from app.tools.srs_reqs_qwen.src.json_generator import JSONGenerator
+from app.tools.srs_reqs_qwen.src.llm_interface import QwenLLM
+from app.tools.srs_reqs_qwen.src.requirement_extractor import RequirementExtractor
+
+
+class SRSTool:
+ TOOL_NAME = "srs.requirement_extractor"
+
+ DEFINITION = ToolDefinition(
+ name=TOOL_NAME,
+ version="1.0.0",
+ description="Extract structured requirements from SRS documents.",
+ input_schema={
+ "type": "object",
+ "properties": {
+ "file_path": {"type": "string"},
+ "enable_llm": {"type": "boolean"},
+ },
+ "required": ["file_path"],
+ },
+ output_schema={
+ "type": "object",
+ "properties": {
+ "document_name": {"type": "string"},
+ "generated_at": {"type": "string"},
+ "requirements": {"type": "array"},
+ "statistics": {"type": "object"},
+ "raw_output": {"type": "object"},
+ },
+ },
+ )
+
+ PRIORITY_BY_TYPE = {
+ "functional": "中",
+ "interface": "高",
+ "performance": "中",
+ "security": "高",
+ "reliability": "中",
+ "other": "低",
+ }
+
+ def __init__(self) -> None:
+ ToolRegistry.register(self.DEFINITION)
+
+ def run(self, file_path: str, enable_llm: bool = True) -> Dict[str, Any]:
+ config = self._load_config()
+ llm = self._build_llm(config, enable_llm=enable_llm)
+
+ parser = create_parser(file_path)
+ if llm is not None:
+ parser.set_llm(llm)
+
+ sections = parser.parse()
+ document_title = parser.get_document_title() or Path(file_path).name
+
+ extractor = RequirementExtractor(config, llm=llm)
+ extracted = extractor.extract_from_sections(sections)
+ stats = extractor.get_statistics()
+
+ generator = JSONGenerator(config)
+ raw_output = generator.generate(sections, extracted, document_title)
+
+ requirements = self._normalize_requirements(extracted)
+
+ return {
+ "document_name": Path(file_path).name,
+ "document_title": document_title,
+ "generated_at": raw_output.get("文档元数据", {}).get("生成时间"),
+ "requirements": requirements,
+ "statistics": stats,
+ "raw_output": raw_output,
+ }
+
+ def _normalize_requirements(self, extracted: List[Any]) -> List[Dict[str, Any]]:
+ normalized: List[Dict[str, Any]] = []
+ for index, req in enumerate(extracted, start=1):
+ description = (req.description or "").strip()
+ title = description[:40] if description else f"需求项 {index}"
+ source_field = f"{req.section_number} {req.section_title}".strip() or "文档解析"
+ normalized.append(
+ {
+ "id": req.id,
+ "title": title,
+ "description": description,
+ "priority": self.PRIORITY_BY_TYPE.get(req.type, "中"),
+ "acceptance_criteria": [description] if description else ["待补充验收标准"],
+ "source_field": source_field,
+ "section_number": req.section_number,
+ "section_title": req.section_title,
+ "requirement_type": req.type,
+ "sort_order": index,
+ }
+ )
+ return normalized
+
+ def _load_config(self) -> Dict[str, Any]:
+ config_path = Path(__file__).with_name("default_config.yaml")
+ if config_path.exists():
+ with config_path.open("r", encoding="utf-8") as handle:
+ config = yaml.safe_load(handle) or {}
+ else:
+ config = {}
+
+ config.setdefault("llm", {})
+ config["llm"]["model"] = settings.DASH_SCOPE_CHAT_MODEL or settings.OPENAI_MODEL
+ config["llm"]["api_key"] = settings.DASH_SCOPE_API_KEY or os.getenv("DASHSCOPE_API_KEY", "")
+ config["llm"]["api_base"] = settings.DASH_SCOPE_API_BASE
+ config["llm"]["enabled"] = bool(config["llm"].get("api_key"))
+ return config
+
+ def _build_llm(self, config: Dict[str, Any], enable_llm: bool) -> QwenLLM | None:
+ if not enable_llm:
+ return None
+
+ llm_cfg = config.get("llm", {})
+ api_key = llm_cfg.get("api_key")
+ if not api_key:
+ return None
+
+ return QwenLLM(
+ api_key=api_key,
+ model=llm_cfg.get("model", "qwen3-max"),
+ api_endpoint=llm_cfg.get("api_base") or settings.DASH_SCOPE_API_BASE,
+ temperature=llm_cfg.get("temperature", 0.3),
+ max_tokens=llm_cfg.get("max_tokens", 1024),
+ )
+
+
+_SRS_TOOL_SINGLETON: SRSTool | None = None
+
+
+def get_srs_tool() -> SRSTool:
+ global _SRS_TOOL_SINGLETON
+ if _SRS_TOOL_SINGLETON is None:
+ _SRS_TOOL_SINGLETON = SRSTool()
+ return _SRS_TOOL_SINGLETON
diff --git a/rag-web-ui/backend/clean_db.py b/rag-web-ui/backend/clean_db.py
new file mode 100644
index 0000000..4af12e6
--- /dev/null
+++ b/rag-web-ui/backend/clean_db.py
@@ -0,0 +1,23 @@
+from sqlalchemy import create_engine, text
+from app.core.config import settings
+
+def clean_database():
+ engine = create_engine(settings.get_database_url)
+ with engine.connect() as conn:
+ # First, drop tables with foreign key constraints
+ conn.execute(text("SET FOREIGN_KEY_CHECKS = 0"))
+ conn.execute(text("DROP TABLE IF EXISTS processing_tasks"))
+ conn.execute(text("DROP TABLE IF EXISTS alembic_version"))
+ conn.execute(text("DROP TABLE IF EXISTS document_chunks"))
+ conn.execute(text("DROP TABLE IF EXISTS chat_knowledge_bases"))
+ conn.execute(text("DROP TABLE IF EXISTS documents"))
+ conn.execute(text("DROP TABLE IF EXISTS knowledge_bases"))
+ conn.execute(text("DROP TABLE IF EXISTS messages"))
+ conn.execute(text("DROP TABLE IF EXISTS chats"))
+ conn.execute(text("DROP TABLE IF EXISTS users"))
+ conn.execute(text("SET FOREIGN_KEY_CHECKS = 1"))
+ conn.commit()
+
+if __name__ == "__main__":
+ clean_database()
+ print("Database cleaned successfully")
\ No newline at end of file
diff --git a/rag-web-ui/backend/diff.py b/rag-web-ui/backend/diff.py
new file mode 100644
index 0000000..5ee8961
--- /dev/null
+++ b/rag-web-ui/backend/diff.py
@@ -0,0 +1,166 @@
+# 同步算法的实现和验证
+# 算法说明:
+# 1. 使用哈希表(defaultdict)建立content_hash到chunks的映射,时间复杂度O(n)
+# 2. 使用集合操作找到相同位置的chunks,时间复杂度O(n)
+# 3. 使用双指针法进行剩余chunks的匹配,时间复杂度O(n)
+# 总体时间复杂度: O(n),其中n为chunks的总数
+# 空间复杂度: O(n),主要用于存储哈希表
+
+from collections import defaultdict
+from typing import TypedDict, List, Dict, Set
+from dataclasses import dataclass
+
+@dataclass
+class Chunk:
+ index: int
+ content_hash: str
+ chunk_content: str
+ uuid: str = None
+
+class SyncResult(TypedDict):
+ to_create: List[Dict]
+ to_update: List[Dict]
+ to_delete: List[str]
+
+# 模拟后端的旧 chunks 数据
+old_chunks = [
+ {'uuid': 'uuid_1', 'index': 0, 'content_hash': 'hash_A', 'chunk_content': '这是第一段。'},
+ {'uuid': 'uuid_2', 'index': 1, 'content_hash': 'hash_B', 'chunk_content': '这是第二段。'},
+ {'uuid': 'uuid_3', 'index': 2, 'content_hash': 'hash_C', 'chunk_content': '这是第三段。'},
+ {'uuid': 'uuid_4', 'index': 3, 'content_hash': 'hash_D', 'chunk_content': '这是第四段。'},
+ {'uuid': 'uuid_5', 'index': 4, 'content_hash': 'hash_E', 'chunk_content': '这是第五段。'},
+]
+
+# 模拟 GitHub Actions 生成的新 chunks 数据
+new_chunks = [
+ {'index': 0, 'content_hash': 'hash_A', 'chunk_content': '这是第一段。'},
+ {'index': 1, 'content_hash': 'hash_C', 'chunk_content': '这是第三段。'},
+ {'index': 2, 'content_hash': 'hash_D', 'chunk_content': '这是第四段。'},
+ {'index': 3, 'content_hash': 'hash_D', 'chunk_content': '这是第四段。'},
+ {'index': 4, 'content_hash': 'hash_D', 'chunk_content': '这是第四段。'},
+ {'index': 5, 'content_hash': 'hash_D', 'chunk_content': '这是第四段。'},
+ {'index': 6, 'content_hash': 'hash_D', 'chunk_content': '这是第四段。'},
+]
+
+def synchronize_chunks(old_chunks: List[Dict], new_chunks: List[Dict]) -> SyncResult:
+ """
+ 基于 content_hash + index 的双指针匹配算法,查找需要新增、更新和删除的 chunks。
+ 主要改进:
+ 1. 对同一 content_hash 的旧、新 chunks,分别按 index 排序,再逐个匹配,避免原先直接根据
+ “两两相同位置”导致重复 content_hash 时的混淆。
+ 2. 保留了原先的距离阈值(distance <= threshold)判断,但逻辑更直观,减少漏匹或误判。
+ """
+
+ # ========== 1. 输入验证 ==========
+ if not isinstance(old_chunks, list) or not isinstance(new_chunks, list):
+ raise TypeError("输入参数必须是列表类型")
+
+ required_fields = {'index', 'content_hash', 'chunk_content'}
+ for chunk in old_chunks:
+ if not required_fields.union({'uuid'}).issubset(chunk.keys()):
+ raise ValueError("旧chunks缺少必要字段")
+ for chunk in new_chunks:
+ if not required_fields.issubset(chunk.keys()):
+ raise ValueError("新chunks缺少必要字段")
+
+ # ========== 2. 构建 content_hash => chunks 的映射表,减少跨 content_hash 的错误匹配 ==========
+ old_chunks_by_hash = defaultdict(list)
+ for oc in old_chunks:
+ old_chunks_by_hash[oc['content_hash']].append(oc)
+
+ new_chunks_by_hash = defaultdict(list)
+ for nc in new_chunks:
+ new_chunks_by_hash[nc['content_hash']].append(nc)
+
+ # ========== 3. 遍历所有的 content_hash,逐个匹配 ==========
+
+ to_create = []
+ to_update = []
+ to_delete = []
+
+ # “并”集获取所有出现过的 content_hash
+ all_hashes = set(old_chunks_by_hash.keys()) | set(new_chunks_by_hash.keys())
+
+ # 允许的更新距离阈值,可根据需要调大或调小
+ threshold = 10
+
+ for content_hash in all_hashes:
+ old_list = sorted(old_chunks_by_hash[content_hash], key=lambda x: x['index'])
+ new_list = sorted(new_chunks_by_hash[content_hash], key=lambda x: x['index'])
+
+ i, j = 0, 0
+ len_old, len_new = len(old_list), len(new_list)
+
+ while i < len_old and j < len_new:
+ old_entry = old_list[i]
+ new_entry = new_list[j]
+ distance = abs(old_entry['index'] - new_entry['index'])
+
+ # 如果索引相近,则判定为同一块内容,执行更新操作
+ if distance <= threshold:
+ to_update.append({
+ 'uuid': old_entry['uuid'],
+ 'index': new_entry['index'],
+ 'content_hash': content_hash,
+ 'chunk_content': new_entry['chunk_content']
+ })
+ i += 1
+ j += 1
+
+ # 如果旧 chunk.index 更小,说明它在新列表里没有合适的配对,需要删除
+ elif old_entry['index'] < new_entry['index']:
+ to_delete.append(old_entry['uuid'])
+ i += 1
+
+ # 否则,新 chunk.index 更小,说明这是新增加的块
+ else:
+ to_create.append({
+ 'index': new_entry['index'],
+ 'content_hash': content_hash,
+ 'chunk_content': new_entry['chunk_content']
+ })
+ j += 1
+
+ # 把剩余的旧 chunks 视为需要删除
+ while i < len_old:
+ to_delete.append(old_list[i]['uuid'])
+ i += 1
+
+ # 把剩余的新 chunks 视为需要新增
+ while j < len_new:
+ to_create.append({
+ 'index': new_list[j]['index'],
+ 'content_hash': content_hash,
+ 'chunk_content': new_list[j]['chunk_content']
+ })
+ j += 1
+
+ return {
+ 'to_create': to_create,
+ 'to_update': to_update,
+ 'to_delete': to_delete
+ }
+
+if __name__ == '__main__':
+ result = synchronize_chunks(old_chunks, new_chunks)
+
+ print("需要创建的 chunks:")
+ if result['to_create']:
+ for chunk in result['to_create']:
+ print(chunk)
+ else:
+ print("null")
+
+ print("\n需要更新的 chunks:")
+ if result['to_update']:
+ for chunk in result['to_update']:
+ print(chunk)
+ else:
+ print("null")
+
+ print("\n需要删除的 chunks:")
+ if result['to_delete']:
+ for uuid in result['to_delete']:
+ print(uuid)
+ else:
+ print("null")
\ No newline at end of file
diff --git a/rag-web-ui/backend/entrypoint.sh b/rag-web-ui/backend/entrypoint.sh
new file mode 100644
index 0000000..b0807b4
--- /dev/null
+++ b/rag-web-ui/backend/entrypoint.sh
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+# exit on error
+set -e
+
+echo "Waiting for MySQL..."
+while ! nc -z db 3306; do
+ sleep 1
+done
+echo "MySQL started"
+
+echo "Running migrations..."
+if alembic upgrade head; then
+ echo "Migrations completed successfully"
+else
+ echo "Migration failed"
+ exit 1
+fi
+
+echo "Starting application..."
+if [ "$ENVIRONMENT" = "development" ]; then
+ uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload
+else
+ uvicorn app.main:app --host 0.0.0.0 --port 8000
+fi
diff --git a/rag-web-ui/backend/nano_graphrag/__init__.py b/rag-web-ui/backend/nano_graphrag/__init__.py
new file mode 100644
index 0000000..fe20e4b
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/__init__.py
@@ -0,0 +1,7 @@
+from .graphrag import GraphRAG, QueryParam
+
+__version__ = "0.0.8.2"
+__author__ = "Jianbai Ye"
+__url__ = "https://github.com/gusye1234/nano-graphrag"
+
+# dp stands for data pack
diff --git a/rag-web-ui/backend/nano_graphrag/_llm.py b/rag-web-ui/backend/nano_graphrag/_llm.py
new file mode 100644
index 0000000..05cced9
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/_llm.py
@@ -0,0 +1,301 @@
+import json
+import numpy as np
+from typing import Optional, List, Any, Callable
+
+try:
+ import aioboto3
+except ImportError:
+ aioboto3 = None
+from openai import AsyncOpenAI, AsyncAzureOpenAI, APIConnectionError, RateLimitError
+
+from tenacity import (
+ retry,
+ stop_after_attempt,
+ wait_exponential,
+ retry_if_exception_type,
+)
+import os
+
+from ._utils import compute_args_hash, wrap_embedding_func_with_attrs
+from .base import BaseKVStorage
+
+global_openai_async_client = None
+global_azure_openai_async_client = None
+global_amazon_bedrock_async_client = None
+
+
+def get_openai_async_client_instance():
+ global global_openai_async_client
+ if global_openai_async_client is None:
+ global_openai_async_client = AsyncOpenAI()
+ return global_openai_async_client
+
+
+def get_azure_openai_async_client_instance():
+ global global_azure_openai_async_client
+ if global_azure_openai_async_client is None:
+ global_azure_openai_async_client = AsyncAzureOpenAI()
+ return global_azure_openai_async_client
+
+
+def get_amazon_bedrock_async_client_instance():
+ global global_amazon_bedrock_async_client
+ if aioboto3 is None:
+ raise ImportError(
+ "aioboto3 is required for Amazon Bedrock support. Install it to use Bedrock providers."
+ )
+ if global_amazon_bedrock_async_client is None:
+ global_amazon_bedrock_async_client = aioboto3.Session()
+ return global_amazon_bedrock_async_client
+
+
+@retry(
+ stop=stop_after_attempt(5),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError)),
+)
+async def openai_complete_if_cache(
+ model, prompt, system_prompt=None, history_messages=[], **kwargs
+) -> str:
+ openai_async_client = get_openai_async_client_instance()
+ hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
+ messages = []
+ if system_prompt:
+ messages.append({"role": "system", "content": system_prompt})
+ messages.extend(history_messages)
+ messages.append({"role": "user", "content": prompt})
+ if hashing_kv is not None:
+ args_hash = compute_args_hash(model, messages)
+ if_cache_return = await hashing_kv.get_by_id(args_hash)
+ if if_cache_return is not None:
+ return if_cache_return["return"]
+
+ response = await openai_async_client.chat.completions.create(
+ model=model, messages=messages, **kwargs
+ )
+
+ if hashing_kv is not None:
+ await hashing_kv.upsert(
+ {args_hash: {"return": response.choices[0].message.content, "model": model}}
+ )
+ await hashing_kv.index_done_callback()
+ return response.choices[0].message.content
+
+
+@retry(
+ stop=stop_after_attempt(5),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError)),
+)
+async def amazon_bedrock_complete_if_cache(
+ model, prompt, system_prompt=None, history_messages=[], **kwargs
+) -> str:
+ amazon_bedrock_async_client = get_amazon_bedrock_async_client_instance()
+ hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
+ messages = []
+ messages.extend(history_messages)
+ messages.append({"role": "user", "content": [{"text": prompt}]})
+ if hashing_kv is not None:
+ args_hash = compute_args_hash(model, messages)
+ if_cache_return = await hashing_kv.get_by_id(args_hash)
+ if if_cache_return is not None:
+ return if_cache_return["return"]
+
+ inference_config = {
+ "temperature": 0,
+ "maxTokens": 4096 if "max_tokens" not in kwargs else kwargs["max_tokens"],
+ }
+
+ async with amazon_bedrock_async_client.client(
+ "bedrock-runtime",
+ region_name=os.getenv("AWS_REGION", "us-east-1")
+ ) as bedrock_runtime:
+ if system_prompt:
+ response = await bedrock_runtime.converse(
+ modelId=model, messages=messages, inferenceConfig=inference_config,
+ system=[{"text": system_prompt}]
+ )
+ else:
+ response = await bedrock_runtime.converse(
+ modelId=model, messages=messages, inferenceConfig=inference_config,
+ )
+
+ if hashing_kv is not None:
+ await hashing_kv.upsert(
+ {args_hash: {"return": response["output"]["message"]["content"][0]["text"], "model": model}}
+ )
+ await hashing_kv.index_done_callback()
+ return response["output"]["message"]["content"][0]["text"]
+
+
+def create_amazon_bedrock_complete_function(model_id: str) -> Callable:
+ """
+ Factory function to dynamically create completion functions for Amazon Bedrock
+
+ Args:
+ model_id (str): Amazon Bedrock model identifier (e.g., "us.anthropic.claude-3-sonnet-20240229-v1:0")
+
+ Returns:
+ Callable: Generated completion function
+ """
+ async def bedrock_complete(
+ prompt: str,
+ system_prompt: Optional[str] = None,
+ history_messages: List[Any] = [],
+ **kwargs
+ ) -> str:
+ return await amazon_bedrock_complete_if_cache(
+ model_id,
+ prompt,
+ system_prompt=system_prompt,
+ history_messages=history_messages,
+ **kwargs
+ )
+
+ # Set function name for easier debugging
+ bedrock_complete.__name__ = f"{model_id}_complete"
+
+ return bedrock_complete
+
+
+async def gpt_4o_complete(
+ prompt, system_prompt=None, history_messages=[], **kwargs
+) -> str:
+ return await openai_complete_if_cache(
+ "gpt-4o",
+ prompt,
+ system_prompt=system_prompt,
+ history_messages=history_messages,
+ **kwargs,
+ )
+
+
+async def gpt_4o_mini_complete(
+ prompt, system_prompt=None, history_messages=[], **kwargs
+) -> str:
+ return await openai_complete_if_cache(
+ "gpt-4o-mini",
+ prompt,
+ system_prompt=system_prompt,
+ history_messages=history_messages,
+ **kwargs,
+ )
+
+
+@wrap_embedding_func_with_attrs(embedding_dim=1024, max_token_size=8192)
+@retry(
+ stop=stop_after_attempt(5),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError)),
+)
+async def amazon_bedrock_embedding(texts: list[str]) -> np.ndarray:
+ amazon_bedrock_async_client = get_amazon_bedrock_async_client_instance()
+
+ async with amazon_bedrock_async_client.client(
+ "bedrock-runtime",
+ region_name=os.getenv("AWS_REGION", "us-east-1")
+ ) as bedrock_runtime:
+ embeddings = []
+ for text in texts:
+ body = json.dumps(
+ {
+ "inputText": text,
+ "dimensions": 1024,
+ }
+ )
+ response = await bedrock_runtime.invoke_model(
+ modelId="amazon.titan-embed-text-v2:0", body=body,
+ )
+ response_body = await response.get("body").read()
+ embeddings.append(json.loads(response_body))
+ return np.array([dp["embedding"] for dp in embeddings])
+
+
+@wrap_embedding_func_with_attrs(embedding_dim=1536, max_token_size=8192)
+@retry(
+ stop=stop_after_attempt(5),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError)),
+)
+async def openai_embedding(texts: list[str]) -> np.ndarray:
+ openai_async_client = get_openai_async_client_instance()
+ response = await openai_async_client.embeddings.create(
+ model="text-embedding-3-small", input=texts, encoding_format="float"
+ )
+ return np.array([dp.embedding for dp in response.data])
+
+
+@retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError)),
+)
+async def azure_openai_complete_if_cache(
+ deployment_name, prompt, system_prompt=None, history_messages=[], **kwargs
+) -> str:
+ azure_openai_client = get_azure_openai_async_client_instance()
+ hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
+ messages = []
+ if system_prompt:
+ messages.append({"role": "system", "content": system_prompt})
+ messages.extend(history_messages)
+ messages.append({"role": "user", "content": prompt})
+ if hashing_kv is not None:
+ args_hash = compute_args_hash(deployment_name, messages)
+ if_cache_return = await hashing_kv.get_by_id(args_hash)
+ if if_cache_return is not None:
+ return if_cache_return["return"]
+
+ response = await azure_openai_client.chat.completions.create(
+ model=deployment_name, messages=messages, **kwargs
+ )
+
+ if hashing_kv is not None:
+ await hashing_kv.upsert(
+ {
+ args_hash: {
+ "return": response.choices[0].message.content,
+ "model": deployment_name,
+ }
+ }
+ )
+ await hashing_kv.index_done_callback()
+ return response.choices[0].message.content
+
+
+async def azure_gpt_4o_complete(
+ prompt, system_prompt=None, history_messages=[], **kwargs
+) -> str:
+ return await azure_openai_complete_if_cache(
+ "gpt-4o",
+ prompt,
+ system_prompt=system_prompt,
+ history_messages=history_messages,
+ **kwargs,
+ )
+
+
+async def azure_gpt_4o_mini_complete(
+ prompt, system_prompt=None, history_messages=[], **kwargs
+) -> str:
+ return await azure_openai_complete_if_cache(
+ "gpt-4o-mini",
+ prompt,
+ system_prompt=system_prompt,
+ history_messages=history_messages,
+ **kwargs,
+ )
+
+
+@wrap_embedding_func_with_attrs(embedding_dim=1536, max_token_size=8192)
+@retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError)),
+)
+async def azure_openai_embedding(texts: list[str]) -> np.ndarray:
+ azure_openai_client = get_azure_openai_async_client_instance()
+ response = await azure_openai_client.embeddings.create(
+ model="text-embedding-3-small", input=texts, encoding_format="float"
+ )
+ return np.array([dp.embedding for dp in response.data])
diff --git a/rag-web-ui/backend/nano_graphrag/_op.py b/rag-web-ui/backend/nano_graphrag/_op.py
new file mode 100644
index 0000000..b8cef88
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/_op.py
@@ -0,0 +1,1140 @@
+import re
+import json
+import asyncio
+from typing import Union
+from collections import Counter, defaultdict
+from ._splitter import SeparatorSplitter
+from ._utils import (
+ logger,
+ clean_str,
+ compute_mdhash_id,
+ is_float_regex,
+ list_of_list_to_csv,
+ pack_user_ass_to_openai_messages,
+ split_string_by_multi_markers,
+ truncate_list_by_token_size,
+
+ TokenizerWrapper
+)
+from .base import (
+ BaseGraphStorage,
+ BaseKVStorage,
+ BaseVectorStorage,
+ SingleCommunitySchema,
+ CommunitySchema,
+ TextChunkSchema,
+ QueryParam,
+)
+from .prompt import GRAPH_FIELD_SEP, PROMPTS
+
+
+def chunking_by_token_size(
+ tokens_list: list[list[int]],
+ doc_keys,
+ tokenizer_wrapper: TokenizerWrapper,
+ overlap_token_size=128,
+ max_token_size=1024,
+):
+ results = []
+ for index, tokens in enumerate(tokens_list):
+ chunk_token = []
+ lengths = []
+ for start in range(0, len(tokens), max_token_size - overlap_token_size):
+ chunk_token.append(tokens[start : start + max_token_size])
+ lengths.append(min(max_token_size, len(tokens) - start))
+
+
+ chunk_texts = tokenizer_wrapper.decode_batch(chunk_token)
+
+ for i, chunk in enumerate(chunk_texts):
+ results.append(
+ {
+ "tokens": lengths[i],
+ "content": chunk.strip(),
+ "chunk_order_index": i,
+ "full_doc_id": doc_keys[index],
+ }
+ )
+ return results
+
+
+def chunking_by_seperators(
+ tokens_list: list[list[int]],
+ doc_keys,
+ tokenizer_wrapper: TokenizerWrapper,
+ overlap_token_size=128,
+ max_token_size=1024,
+):
+ from .prompt import PROMPTS
+ # *** 修改 ***: 直接使用 wrapper 编码,而不是获取底层 tokenizer
+ separators = [tokenizer_wrapper.encode(s) for s in PROMPTS["default_text_separator"]]
+ splitter = SeparatorSplitter(
+ separators=separators,
+ chunk_size=max_token_size,
+ chunk_overlap=overlap_token_size,
+ )
+ results = []
+ for index, tokens in enumerate(tokens_list):
+ chunk_tokens = splitter.split_tokens(tokens)
+ lengths = [len(c) for c in chunk_tokens]
+
+ decoded_chunks = tokenizer_wrapper.decode_batch(chunk_tokens)
+ for i, chunk in enumerate(decoded_chunks):
+ results.append(
+ {
+ "tokens": lengths[i],
+ "content": chunk.strip(),
+ "chunk_order_index": i,
+ "full_doc_id": doc_keys[index],
+ }
+ )
+ return results
+
+
+def get_chunks(new_docs, chunk_func=chunking_by_token_size, tokenizer_wrapper: TokenizerWrapper = None, **chunk_func_params):
+ inserting_chunks = {}
+ new_docs_list = list(new_docs.items())
+ docs = [new_doc[1]["content"] for new_doc in new_docs_list]
+ doc_keys = [new_doc[0] for new_doc in new_docs_list]
+
+ tokens = [tokenizer_wrapper.encode(doc) for doc in docs]
+ chunks = chunk_func(
+ tokens, doc_keys=doc_keys, tokenizer_wrapper=tokenizer_wrapper, overlap_token_size=chunk_func_params.get("overlap_token_size", 128), max_token_size=chunk_func_params.get("max_token_size", 1024)
+ )
+ for chunk in chunks:
+ inserting_chunks.update(
+ {compute_mdhash_id(chunk["content"], prefix="chunk-"): chunk}
+ )
+ return inserting_chunks
+
+
+async def _handle_entity_relation_summary(
+ entity_or_relation_name: str,
+ description: str,
+ global_config: dict,
+ tokenizer_wrapper: TokenizerWrapper,
+) -> str:
+ use_llm_func: callable = global_config["cheap_model_func"]
+ llm_max_tokens = global_config["cheap_model_max_token_size"]
+ summary_max_tokens = global_config["entity_summary_to_max_tokens"]
+
+
+ tokens = tokenizer_wrapper.encode(description)
+ if len(tokens) < summary_max_tokens:
+ return description
+ prompt_template = PROMPTS["summarize_entity_descriptions"]
+
+ use_description = tokenizer_wrapper.decode(tokens[:llm_max_tokens])
+ context_base = dict(
+ entity_name=entity_or_relation_name,
+ description_list=use_description.split(GRAPH_FIELD_SEP),
+ )
+ use_prompt = prompt_template.format(**context_base)
+ logger.debug(f"Trigger summary: {entity_or_relation_name}")
+ summary = await use_llm_func(use_prompt, max_tokens=summary_max_tokens)
+ return summary
+
+
+async def _handle_single_entity_extraction(
+ record_attributes: list[str],
+ chunk_key: str,
+):
+ if len(record_attributes) < 4 or record_attributes[0] != '"entity"':
+ return None
+ # add this record as a node in the G
+ entity_name = clean_str(record_attributes[1].upper())
+ if not entity_name.strip():
+ return None
+ entity_type = clean_str(record_attributes[2].upper())
+ entity_description = clean_str(record_attributes[3])
+ entity_source_id = chunk_key
+ return dict(
+ entity_name=entity_name,
+ entity_type=entity_type,
+ description=entity_description,
+ source_id=entity_source_id,
+ )
+
+
+async def _handle_single_relationship_extraction(
+ record_attributes: list[str],
+ chunk_key: str,
+):
+ if len(record_attributes) < 5 or record_attributes[0] != '"relationship"':
+ return None
+ # add this record as edge
+ source = clean_str(record_attributes[1].upper())
+ target = clean_str(record_attributes[2].upper())
+ edge_description = clean_str(record_attributes[3])
+ edge_source_id = chunk_key
+ weight = (
+ float(record_attributes[-1]) if is_float_regex(record_attributes[-1]) else 1.0
+ )
+ return dict(
+ src_id=source,
+ tgt_id=target,
+ weight=weight,
+ description=edge_description,
+ source_id=edge_source_id,
+ )
+
+
+async def _merge_nodes_then_upsert(
+ entity_name: str,
+ nodes_data: list[dict],
+ knwoledge_graph_inst: BaseGraphStorage,
+ global_config: dict,
+ tokenizer_wrapper,
+):
+ already_entitiy_types = []
+ already_source_ids = []
+ already_description = []
+
+ already_node = await knwoledge_graph_inst.get_node(entity_name)
+ if already_node is not None:
+ already_entitiy_types.append(already_node["entity_type"])
+ already_source_ids.extend(
+ split_string_by_multi_markers(already_node["source_id"], [GRAPH_FIELD_SEP])
+ )
+ already_description.append(already_node["description"])
+
+ entity_type = sorted(
+ Counter(
+ [dp["entity_type"] for dp in nodes_data] + already_entitiy_types
+ ).items(),
+ key=lambda x: x[1],
+ reverse=True,
+ )[0][0]
+ description = GRAPH_FIELD_SEP.join(
+ sorted(set([dp["description"] for dp in nodes_data] + already_description))
+ )
+ source_id = GRAPH_FIELD_SEP.join(
+ set([dp["source_id"] for dp in nodes_data] + already_source_ids)
+ )
+ description = await _handle_entity_relation_summary(
+ entity_name, description, global_config, tokenizer_wrapper
+ )
+ node_data = dict(
+ entity_type=entity_type,
+ description=description,
+ source_id=source_id,
+ )
+ await knwoledge_graph_inst.upsert_node(
+ entity_name,
+ node_data=node_data,
+ )
+ node_data["entity_name"] = entity_name
+ return node_data
+
+
+async def _merge_edges_then_upsert(
+ src_id: str,
+ tgt_id: str,
+ edges_data: list[dict],
+ knwoledge_graph_inst: BaseGraphStorage,
+ global_config: dict,
+ tokenizer_wrapper,
+):
+ already_weights = []
+ already_source_ids = []
+ already_description = []
+ already_order = []
+ if await knwoledge_graph_inst.has_edge(src_id, tgt_id):
+ already_edge = await knwoledge_graph_inst.get_edge(src_id, tgt_id)
+ already_weights.append(already_edge["weight"])
+ already_source_ids.extend(
+ split_string_by_multi_markers(already_edge["source_id"], [GRAPH_FIELD_SEP])
+ )
+ already_description.append(already_edge["description"])
+ already_order.append(already_edge.get("order", 1))
+
+ # [numberchiffre]: `Relationship.order` is only returned from DSPy's predictions
+ order = min([dp.get("order", 1) for dp in edges_data] + already_order)
+ weight = sum([dp["weight"] for dp in edges_data] + already_weights)
+ description = GRAPH_FIELD_SEP.join(
+ sorted(set([dp["description"] for dp in edges_data] + already_description))
+ )
+ source_id = GRAPH_FIELD_SEP.join(
+ set([dp["source_id"] for dp in edges_data] + already_source_ids)
+ )
+ for need_insert_id in [src_id, tgt_id]:
+ if not (await knwoledge_graph_inst.has_node(need_insert_id)):
+ await knwoledge_graph_inst.upsert_node(
+ need_insert_id,
+ node_data={
+ "source_id": source_id,
+ "description": description,
+ "entity_type": '"UNKNOWN"',
+ },
+ )
+ description = await _handle_entity_relation_summary(
+ (src_id, tgt_id), description, global_config, tokenizer_wrapper
+ )
+ await knwoledge_graph_inst.upsert_edge(
+ src_id,
+ tgt_id,
+ edge_data=dict(
+ weight=weight, description=description, source_id=source_id, order=order
+ ),
+ )
+
+
+async def extract_entities(
+ chunks: dict[str, TextChunkSchema],
+ knwoledge_graph_inst: BaseGraphStorage,
+ entity_vdb: BaseVectorStorage,
+ tokenizer_wrapper,
+ global_config: dict,
+ using_amazon_bedrock: bool=False,
+) -> Union[BaseGraphStorage, None]:
+ use_llm_func: callable = global_config["best_model_func"]
+ entity_extract_max_gleaning = global_config["entity_extract_max_gleaning"]
+
+ ordered_chunks = list(chunks.items())
+
+ entity_extract_prompt = PROMPTS["entity_extraction"]
+ context_base = dict(
+ tuple_delimiter=PROMPTS["DEFAULT_TUPLE_DELIMITER"],
+ record_delimiter=PROMPTS["DEFAULT_RECORD_DELIMITER"],
+ completion_delimiter=PROMPTS["DEFAULT_COMPLETION_DELIMITER"],
+ entity_types=",".join(PROMPTS["DEFAULT_ENTITY_TYPES"]),
+ )
+ continue_prompt = PROMPTS["entiti_continue_extraction"]
+ if_loop_prompt = PROMPTS["entiti_if_loop_extraction"]
+
+ already_processed = 0
+ already_entities = 0
+ already_relations = 0
+
+ async def _process_single_content(chunk_key_dp: tuple[str, TextChunkSchema]):
+ nonlocal already_processed, already_entities, already_relations
+ chunk_key = chunk_key_dp[0]
+ chunk_dp = chunk_key_dp[1]
+ content = chunk_dp["content"]
+ hint_prompt = entity_extract_prompt.format(**context_base, input_text=content)
+ final_result = await use_llm_func(hint_prompt)
+ if isinstance(final_result, list):
+ final_result = final_result[0]["text"]
+
+ history = pack_user_ass_to_openai_messages(hint_prompt, final_result, using_amazon_bedrock)
+ for now_glean_index in range(entity_extract_max_gleaning):
+ glean_result = await use_llm_func(continue_prompt, history_messages=history)
+
+ history += pack_user_ass_to_openai_messages(continue_prompt, glean_result, using_amazon_bedrock)
+ final_result += glean_result
+ if now_glean_index == entity_extract_max_gleaning - 1:
+ break
+
+ if_loop_result: str = await use_llm_func(
+ if_loop_prompt, history_messages=history
+ )
+ if_loop_result = if_loop_result.strip().strip('"').strip("'").lower()
+ if if_loop_result != "yes":
+ break
+
+ records = split_string_by_multi_markers(
+ final_result,
+ [context_base["record_delimiter"], context_base["completion_delimiter"]],
+ )
+
+ maybe_nodes = defaultdict(list)
+ maybe_edges = defaultdict(list)
+ for record in records:
+ record = re.search(r"\((.*)\)", record)
+ if record is None:
+ continue
+ record = record.group(1)
+ record_attributes = split_string_by_multi_markers(
+ record, [context_base["tuple_delimiter"]]
+ )
+ if_entities = await _handle_single_entity_extraction(
+ record_attributes, chunk_key
+ )
+ if if_entities is not None:
+ maybe_nodes[if_entities["entity_name"]].append(if_entities)
+ continue
+
+ if_relation = await _handle_single_relationship_extraction(
+ record_attributes, chunk_key
+ )
+ if if_relation is not None:
+ maybe_edges[(if_relation["src_id"], if_relation["tgt_id"])].append(
+ if_relation
+ )
+ already_processed += 1
+ already_entities += len(maybe_nodes)
+ already_relations += len(maybe_edges)
+ now_ticks = PROMPTS["process_tickers"][
+ already_processed % len(PROMPTS["process_tickers"])
+ ]
+ print(
+ f"{now_ticks} Processed {already_processed}({already_processed*100//len(ordered_chunks)}%) chunks, {already_entities} entities(duplicated), {already_relations} relations(duplicated)\r",
+ end="",
+ flush=True,
+ )
+ return dict(maybe_nodes), dict(maybe_edges)
+
+ # use_llm_func is wrapped in ascynio.Semaphore, limiting max_async callings
+ results = await asyncio.gather(
+ *[_process_single_content(c) for c in ordered_chunks]
+ )
+ print() # clear the progress bar
+ maybe_nodes = defaultdict(list)
+ maybe_edges = defaultdict(list)
+ for m_nodes, m_edges in results:
+ for k, v in m_nodes.items():
+ maybe_nodes[k].extend(v)
+ for k, v in m_edges.items():
+ # it's undirected graph
+ maybe_edges[tuple(sorted(k))].extend(v)
+ all_entities_data = await asyncio.gather(
+ *[
+ _merge_nodes_then_upsert(k, v, knwoledge_graph_inst, global_config, tokenizer_wrapper)
+ for k, v in maybe_nodes.items()
+ ]
+ )
+ await asyncio.gather(
+ *[
+ _merge_edges_then_upsert(k[0], k[1], v, knwoledge_graph_inst, global_config, tokenizer_wrapper)
+ for k, v in maybe_edges.items()
+ ]
+ )
+ if not len(all_entities_data):
+ logger.warning("Didn't extract any entities, maybe your LLM is not working")
+ return None
+ if entity_vdb is not None:
+ data_for_vdb = {
+ compute_mdhash_id(dp["entity_name"], prefix="ent-"): {
+ "content": dp["entity_name"] + dp["description"],
+ "entity_name": dp["entity_name"],
+ }
+ for dp in all_entities_data
+ }
+ await entity_vdb.upsert(data_for_vdb)
+ return knwoledge_graph_inst
+
+
+def _pack_single_community_by_sub_communities(
+ community: SingleCommunitySchema,
+ max_token_size: int,
+ already_reports: dict[str, CommunitySchema],
+ tokenizer_wrapper: TokenizerWrapper,
+) -> tuple[str, int, set, set]:
+ all_sub_communities = [
+ already_reports[k] for k in community["sub_communities"] if k in already_reports
+ ]
+ all_sub_communities = sorted(
+ all_sub_communities, key=lambda x: x["occurrence"], reverse=True
+ )
+
+ may_trun_all_sub_communities = truncate_list_by_token_size(
+ all_sub_communities,
+ key=lambda x: x["report_string"],
+ max_token_size=max_token_size,
+ tokenizer_wrapper=tokenizer_wrapper,
+ )
+ sub_fields = ["id", "report", "rating", "importance"]
+ sub_communities_describe = list_of_list_to_csv(
+ [sub_fields]
+ + [
+ [
+ i,
+ c["report_string"],
+ c["report_json"].get("rating", -1),
+ c["occurrence"],
+ ]
+ for i, c in enumerate(may_trun_all_sub_communities)
+ ]
+ )
+ already_nodes = []
+ already_edges = []
+ for c in may_trun_all_sub_communities:
+ already_nodes.extend(c["nodes"])
+ already_edges.extend([tuple(e) for e in c["edges"]])
+
+
+ return (
+ sub_communities_describe,
+ len(tokenizer_wrapper.encode(sub_communities_describe)),
+ set(already_nodes),
+ set(already_edges),
+ )
+
+
+async def _pack_single_community_describe(
+ knwoledge_graph_inst: BaseGraphStorage,
+ community: SingleCommunitySchema,
+ tokenizer_wrapper: "TokenizerWrapper",
+ max_token_size: int = 12000,
+ already_reports: dict[str, CommunitySchema] = {},
+ global_config: dict = {},
+) -> str:
+
+
+
+ # 1. 准备原始数据
+ nodes_in_order = sorted(community["nodes"])
+ edges_in_order = sorted(community["edges"], key=lambda x: x[0] + x[1])
+
+ nodes_data = await asyncio.gather(
+ *[knwoledge_graph_inst.get_node(n) for n in nodes_in_order]
+ )
+ edges_data = await asyncio.gather(
+ *[knwoledge_graph_inst.get_edge(src, tgt) for src, tgt in edges_in_order]
+ )
+
+
+ # 2. 定义模板和固定开销
+ final_template = """-----Reports-----
+```csv
+{reports}
+```
+-----Entities-----
+```csv
+{entities}
+```
+-----Relationships-----
+```csv
+{relationships}
+```"""
+ base_template_tokens = len(tokenizer_wrapper.encode(
+ final_template.format(reports="", entities="", relationships="")
+ ))
+ remaining_budget = max_token_size - base_template_tokens
+
+ # 3. 处理子社区报告
+ report_describe = ""
+ contain_nodes = set()
+ contain_edges = set()
+
+ # 启发式截断检测
+ truncated = len(nodes_in_order) > 100 or len(edges_in_order) > 100
+
+ need_to_use_sub_communities = (
+ truncated and
+ community["sub_communities"] and
+ already_reports
+ )
+ force_to_use_sub_communities = global_config["addon_params"].get(
+ "force_to_use_sub_communities", False
+ )
+
+ if need_to_use_sub_communities or force_to_use_sub_communities:
+ logger.debug(f"Community {community['title']} using sub-communities")
+ # 获取子社区报告及包含的节点/边
+ result = _pack_single_community_by_sub_communities(
+ community, remaining_budget, already_reports, tokenizer_wrapper
+ )
+ report_describe, report_size, contain_nodes, contain_edges = result
+ remaining_budget = max(0, remaining_budget - report_size)
+
+ # 4. 准备节点和边数据(过滤子社区已包含的)
+ def format_row(row: list) -> str:
+ return ','.join('"{}"'.format(str(item).replace('"', '""')) for item in row)
+
+ node_fields = ["id", "entity", "type", "description", "degree"]
+ edge_fields = ["id", "source", "target", "description", "rank"]
+
+ # 获取度数并创建数据结构
+ node_degrees = await knwoledge_graph_inst.node_degrees_batch(nodes_in_order)
+ edge_degrees = await knwoledge_graph_inst.edge_degrees_batch(edges_in_order)
+
+ # 过滤已存在于子社区的节点/边
+ nodes_list_data = [
+ [i, name, data.get("entity_type", "UNKNOWN"),
+ data.get("description", "UNKNOWN"), node_degrees[i]]
+ for i, (name, data) in enumerate(zip(nodes_in_order, nodes_data))
+ if name not in contain_nodes # 关键过滤
+ ]
+
+ edges_list_data = [
+ [i, edge[0], edge[1], data.get("description", "UNKNOWN"), edge_degrees[i]]
+ for i, (edge, data) in enumerate(zip(edges_in_order, edges_data))
+ if (edge[0], edge[1]) not in contain_edges # 关键过滤
+ ]
+
+ # 按重要性排序
+ nodes_list_data.sort(key=lambda x: x[-1], reverse=True)
+ edges_list_data.sort(key=lambda x: x[-1], reverse=True)
+
+ # 5. 动态分配预算
+ # 计算表头开销
+ header_tokens = len(tokenizer_wrapper.encode(
+ list_of_list_to_csv([node_fields]) + "\n" + list_of_list_to_csv([edge_fields])
+ ))
+
+
+
+ data_budget = max(0, remaining_budget - header_tokens)
+ total_items = len(nodes_list_data) + len(edges_list_data)
+ node_ratio = len(nodes_list_data) / max(1, total_items)
+ edge_ratio = 1 - node_ratio
+
+
+
+
+ # 执行截断
+ nodes_final = truncate_list_by_token_size(
+ nodes_list_data, key=format_row,
+ max_token_size=int(data_budget * node_ratio),
+ tokenizer_wrapper=tokenizer_wrapper
+ )
+ edges_final = truncate_list_by_token_size(
+ edges_list_data, key=format_row,
+ max_token_size= int(data_budget * edge_ratio),
+ tokenizer_wrapper=tokenizer_wrapper
+ )
+
+ # 6. 组装最终输出
+ nodes_describe = list_of_list_to_csv([node_fields] + nodes_final)
+ edges_describe = list_of_list_to_csv([edge_fields] + edges_final)
+
+
+
+ final_output = final_template.format(
+ reports=report_describe,
+ entities=nodes_describe,
+ relationships=edges_describe
+ )
+
+ return final_output
+
+
+def _community_report_json_to_str(parsed_output: dict) -> str:
+ """refer official graphrag: index/graph/extractors/community_reports"""
+ title = parsed_output.get("title", "Report")
+ summary = parsed_output.get("summary", "")
+ findings = parsed_output.get("findings", [])
+
+ def finding_summary(finding: dict):
+ if isinstance(finding, str):
+ return finding
+ return finding.get("summary")
+
+ def finding_explanation(finding: dict):
+ if isinstance(finding, str):
+ return ""
+ return finding.get("explanation")
+
+ report_sections = "\n\n".join(
+ f"## {finding_summary(f)}\n\n{finding_explanation(f)}" for f in findings
+ )
+ return f"# {title}\n\n{summary}\n\n{report_sections}"
+
+
+async def generate_community_report(
+ community_report_kv: BaseKVStorage[CommunitySchema],
+ knwoledge_graph_inst: BaseGraphStorage,
+ tokenizer_wrapper: TokenizerWrapper,
+ global_config: dict,
+):
+ llm_extra_kwargs = global_config["special_community_report_llm_kwargs"]
+ use_llm_func: callable = global_config["best_model_func"]
+ use_string_json_convert_func: callable = global_config["convert_response_to_json_func"]
+
+ communities_schema = await knwoledge_graph_inst.community_schema()
+ community_keys, community_values = list(communities_schema.keys()), list(communities_schema.values())
+ already_processed = 0
+
+ prompt_template = PROMPTS["community_report"]
+
+ prompt_overhead = len(tokenizer_wrapper.encode(prompt_template.format(input_text="")))
+
+ async def _form_single_community_report(
+ community: SingleCommunitySchema, already_reports: dict[str, CommunitySchema]
+ ):
+ nonlocal already_processed
+ describe = await _pack_single_community_describe(
+ knwoledge_graph_inst,
+ community,
+ tokenizer_wrapper=tokenizer_wrapper,
+ max_token_size=global_config["best_model_max_token_size"] - prompt_overhead -200, # extra token for chat template and prompt template
+ already_reports=already_reports,
+ global_config=global_config,
+ )
+ prompt = prompt_template.format(input_text=describe)
+
+
+ response = await use_llm_func(prompt, **llm_extra_kwargs)
+ data = use_string_json_convert_func(response)
+ already_processed += 1
+ now_ticks = PROMPTS["process_tickers"][already_processed % len(PROMPTS["process_tickers"])]
+ print(f"{now_ticks} Processed {already_processed} communities\r", end="", flush=True)
+ return data
+
+ levels = sorted(set([c["level"] for c in community_values]), reverse=True)
+ logger.info(f"Generating by levels: {levels}")
+ community_datas = {}
+ for level in levels:
+ this_level_community_keys, this_level_community_values = zip(
+ *[
+ (k, v)
+ for k, v in zip(community_keys, community_values)
+ if v["level"] == level
+ ]
+ )
+ this_level_communities_reports = await asyncio.gather(
+ *[
+ _form_single_community_report(c, community_datas)
+ for c in this_level_community_values
+ ]
+ )
+ community_datas.update(
+ {
+ k: {
+ "report_string": _community_report_json_to_str(r),
+ "report_json": r,
+ **v,
+ }
+ for k, r, v in zip(
+ this_level_community_keys,
+ this_level_communities_reports,
+ this_level_community_values,
+ )
+ }
+ )
+ print() # clear the progress bar
+ await community_report_kv.upsert(community_datas)
+
+
+async def _find_most_related_community_from_entities(
+ node_datas: list[dict],
+ query_param: QueryParam,
+ community_reports: BaseKVStorage[CommunitySchema],
+ tokenizer_wrapper,
+):
+ related_communities = []
+ for node_d in node_datas:
+ if "clusters" not in node_d:
+ continue
+ related_communities.extend(json.loads(node_d["clusters"]))
+ related_community_dup_keys = [
+ str(dp["cluster"])
+ for dp in related_communities
+ if dp["level"] <= query_param.level
+ ]
+ related_community_keys_counts = dict(Counter(related_community_dup_keys))
+ _related_community_datas = await asyncio.gather(
+ *[community_reports.get_by_id(k) for k in related_community_keys_counts.keys()]
+ )
+ related_community_datas = {
+ k: v
+ for k, v in zip(related_community_keys_counts.keys(), _related_community_datas)
+ if v is not None
+ }
+ related_community_keys = sorted(
+ related_community_keys_counts.keys(),
+ key=lambda k: (
+ related_community_keys_counts[k],
+ related_community_datas[k]["report_json"].get("rating", -1),
+ ),
+ reverse=True,
+ )
+ sorted_community_datas = [
+ related_community_datas[k] for k in related_community_keys
+ ]
+
+ use_community_reports = truncate_list_by_token_size(
+ sorted_community_datas,
+ key=lambda x: x["report_string"],
+ max_token_size=query_param.local_max_token_for_community_report,
+ tokenizer_wrapper=tokenizer_wrapper,
+ )
+ if query_param.local_community_single_one:
+ use_community_reports = use_community_reports[:1]
+ return use_community_reports
+
+
+async def _find_most_related_text_unit_from_entities(
+ node_datas: list[dict],
+ query_param: QueryParam,
+ text_chunks_db: BaseKVStorage[TextChunkSchema],
+ knowledge_graph_inst: BaseGraphStorage,
+ tokenizer_wrapper,
+):
+ text_units = [
+ split_string_by_multi_markers(dp["source_id"], [GRAPH_FIELD_SEP])
+ for dp in node_datas
+ ]
+ edges = await knowledge_graph_inst.get_nodes_edges_batch([dp["entity_name"] for dp in node_datas])
+ all_one_hop_nodes = set()
+ for this_edges in edges:
+ if not this_edges:
+ continue
+ all_one_hop_nodes.update([e[1] for e in this_edges])
+ all_one_hop_nodes = list(all_one_hop_nodes)
+ all_one_hop_nodes_data = await knowledge_graph_inst.get_nodes_batch(all_one_hop_nodes)
+ all_one_hop_text_units_lookup = {
+ k: set(split_string_by_multi_markers(v["source_id"], [GRAPH_FIELD_SEP]))
+ for k, v in zip(all_one_hop_nodes, all_one_hop_nodes_data)
+ if v is not None
+ }
+ all_text_units_lookup = {}
+ for index, (this_text_units, this_edges) in enumerate(zip(text_units, edges)):
+ for c_id in this_text_units:
+ if c_id in all_text_units_lookup:
+ continue
+ relation_counts = 0
+ for e in this_edges:
+ if (
+ e[1] in all_one_hop_text_units_lookup
+ and c_id in all_one_hop_text_units_lookup[e[1]]
+ ):
+ relation_counts += 1
+ all_text_units_lookup[c_id] = {
+ "data": await text_chunks_db.get_by_id(c_id),
+ "order": index,
+ "relation_counts": relation_counts,
+ }
+ if any([v is None for v in all_text_units_lookup.values()]):
+ logger.warning("Text chunks are missing, maybe the storage is damaged")
+ all_text_units = [
+ {"id": k, **v} for k, v in all_text_units_lookup.items() if v is not None
+ ]
+ all_text_units = sorted(
+ all_text_units, key=lambda x: (x["order"], -x["relation_counts"])
+ )
+ all_text_units = truncate_list_by_token_size(
+ all_text_units,
+ key=lambda x: x["data"]["content"],
+ max_token_size=query_param.local_max_token_for_text_unit,
+ tokenizer_wrapper=tokenizer_wrapper, # 传入 wrapper
+ )
+ all_text_units: list[TextChunkSchema] = [t["data"] for t in all_text_units]
+ return all_text_units
+
+
+async def _find_most_related_edges_from_entities(
+ node_datas: list[dict],
+ query_param: QueryParam,
+ knowledge_graph_inst: BaseGraphStorage,
+ tokenizer_wrapper,
+):
+ all_related_edges = await knowledge_graph_inst.get_nodes_edges_batch([dp["entity_name"] for dp in node_datas])
+
+ all_edges = []
+ seen = set()
+
+ for this_edges in all_related_edges:
+ for e in this_edges:
+ sorted_edge = tuple(sorted(e))
+ if sorted_edge not in seen:
+ seen.add(sorted_edge)
+ all_edges.append(sorted_edge)
+
+ all_edges_pack = await knowledge_graph_inst.get_edges_batch(all_edges)
+ all_edges_degree = await knowledge_graph_inst.edge_degrees_batch(all_edges)
+ all_edges_data = [
+ {"src_tgt": k, "rank": d, **v}
+ for k, v, d in zip(all_edges, all_edges_pack, all_edges_degree)
+ if v is not None
+ ]
+ all_edges_data = sorted(
+ all_edges_data, key=lambda x: (x["rank"], x["weight"]), reverse=True
+ )
+ all_edges_data = truncate_list_by_token_size(
+ all_edges_data,
+ key=lambda x: x["description"],
+ max_token_size=query_param.local_max_token_for_local_context,
+ tokenizer_wrapper=tokenizer_wrapper,
+ )
+ return all_edges_data
+
+
+async def _build_local_query_context(
+ query,
+ knowledge_graph_inst: BaseGraphStorage,
+ entities_vdb: BaseVectorStorage,
+ community_reports: BaseKVStorage[CommunitySchema],
+ text_chunks_db: BaseKVStorage[TextChunkSchema],
+ query_param: QueryParam,
+ tokenizer_wrapper,
+):
+ results = await entities_vdb.query(query, top_k=query_param.top_k)
+ if not len(results):
+ return None
+ node_datas = await knowledge_graph_inst.get_nodes_batch([r["entity_name"] for r in results])
+ if not all([n is not None for n in node_datas]):
+ logger.warning("Some nodes are missing, maybe the storage is damaged")
+ node_degrees = await knowledge_graph_inst.node_degrees_batch([r["entity_name"] for r in results])
+ node_datas = [
+ {**n, "entity_name": k["entity_name"], "rank": d}
+ for k, n, d in zip(results, node_datas, node_degrees)
+ if n is not None
+ ]
+ use_communities = await _find_most_related_community_from_entities(
+ node_datas, query_param, community_reports, tokenizer_wrapper
+ )
+ use_text_units = await _find_most_related_text_unit_from_entities(
+ node_datas, query_param, text_chunks_db, knowledge_graph_inst, tokenizer_wrapper
+ )
+ use_relations = await _find_most_related_edges_from_entities(
+ node_datas, query_param, knowledge_graph_inst, tokenizer_wrapper
+ )
+ logger.info(
+ f"Using {len(node_datas)} entites, {len(use_communities)} communities, {len(use_relations)} relations, {len(use_text_units)} text units"
+ )
+ entites_section_list = [["id", "entity", "type", "description", "rank"]]
+ for i, n in enumerate(node_datas):
+ entites_section_list.append(
+ [
+ i,
+ n["entity_name"],
+ n.get("entity_type", "UNKNOWN"),
+ n.get("description", "UNKNOWN"),
+ n["rank"],
+ ]
+ )
+ entities_context = list_of_list_to_csv(entites_section_list)
+
+ relations_section_list = [
+ ["id", "source", "target", "description", "weight", "rank"]
+ ]
+ for i, e in enumerate(use_relations):
+ relations_section_list.append(
+ [
+ i,
+ e["src_tgt"][0],
+ e["src_tgt"][1],
+ e["description"],
+ e["weight"],
+ e["rank"],
+ ]
+ )
+ relations_context = list_of_list_to_csv(relations_section_list)
+
+ communities_section_list = [["id", "content"]]
+ for i, c in enumerate(use_communities):
+ communities_section_list.append([i, c["report_string"]])
+ communities_context = list_of_list_to_csv(communities_section_list)
+
+ text_units_section_list = [["id", "content"]]
+ for i, t in enumerate(use_text_units):
+ text_units_section_list.append([i, t["content"]])
+ text_units_context = list_of_list_to_csv(text_units_section_list)
+ return f"""
+-----Reports-----
+```csv
+{communities_context}
+```
+-----Entities-----
+```csv
+{entities_context}
+```
+-----Relationships-----
+```csv
+{relations_context}
+```
+-----Sources-----
+```csv
+{text_units_context}
+```
+"""
+
+
+async def local_query(
+ query,
+ knowledge_graph_inst: BaseGraphStorage,
+ entities_vdb: BaseVectorStorage,
+ community_reports: BaseKVStorage[CommunitySchema],
+ text_chunks_db: BaseKVStorage[TextChunkSchema],
+ query_param: QueryParam,
+ tokenizer_wrapper,
+ global_config: dict,
+) -> str:
+ use_model_func = global_config["best_model_func"]
+ context = await _build_local_query_context(
+ query,
+ knowledge_graph_inst,
+ entities_vdb,
+ community_reports,
+ text_chunks_db,
+ query_param,
+ tokenizer_wrapper,
+ )
+ if query_param.only_need_context:
+ return context
+ if context is None:
+ return PROMPTS["fail_response"]
+ sys_prompt_temp = PROMPTS["local_rag_response"]
+ sys_prompt = sys_prompt_temp.format(
+ context_data=context, response_type=query_param.response_type
+ )
+ response = await use_model_func(
+ query,
+ system_prompt=sys_prompt,
+ )
+ return response
+
+
+async def _map_global_communities(
+ query: str,
+ communities_data: list[CommunitySchema],
+ query_param: QueryParam,
+ global_config: dict,
+ tokenizer_wrapper,
+):
+ use_string_json_convert_func = global_config["convert_response_to_json_func"]
+ use_model_func = global_config["best_model_func"]
+ community_groups = []
+ while len(communities_data):
+ this_group = truncate_list_by_token_size(
+ communities_data,
+ key=lambda x: x["report_string"],
+ max_token_size=query_param.global_max_token_for_community_report,
+ tokenizer_wrapper=tokenizer_wrapper, # 传入 wrapper
+ )
+ community_groups.append(this_group)
+ communities_data = communities_data[len(this_group) :]
+
+ async def _process(community_truncated_datas: list[CommunitySchema]) -> dict:
+ communities_section_list = [["id", "content", "rating", "importance"]]
+ for i, c in enumerate(community_truncated_datas):
+ communities_section_list.append(
+ [
+ i,
+ c["report_string"],
+ c["report_json"].get("rating", 0),
+ c["occurrence"],
+ ]
+ )
+ community_context = list_of_list_to_csv(communities_section_list)
+ sys_prompt_temp = PROMPTS["global_map_rag_points"]
+ sys_prompt = sys_prompt_temp.format(context_data=community_context)
+ response = await use_model_func(
+ query,
+ system_prompt=sys_prompt,
+ **query_param.global_special_community_map_llm_kwargs,
+ )
+ data = use_string_json_convert_func(response)
+ return data.get("points", [])
+
+ logger.info(f"Grouping to {len(community_groups)} groups for global search")
+ responses = await asyncio.gather(*[_process(c) for c in community_groups])
+ return responses
+
+
+async def global_query(
+ query,
+ knowledge_graph_inst: BaseGraphStorage,
+ entities_vdb: BaseVectorStorage,
+ community_reports: BaseKVStorage[CommunitySchema],
+ text_chunks_db: BaseKVStorage[TextChunkSchema],
+ query_param: QueryParam,
+ tokenizer_wrapper,
+ global_config: dict,
+) -> str:
+ community_schema = await knowledge_graph_inst.community_schema()
+ community_schema = {
+ k: v for k, v in community_schema.items() if v["level"] <= query_param.level
+ }
+ if not len(community_schema):
+ return PROMPTS["fail_response"]
+ use_model_func = global_config["best_model_func"]
+
+ sorted_community_schemas = sorted(
+ community_schema.items(),
+ key=lambda x: x[1]["occurrence"],
+ reverse=True,
+ )
+ sorted_community_schemas = sorted_community_schemas[
+ : query_param.global_max_consider_community
+ ]
+ community_datas = await community_reports.get_by_ids(
+ [k[0] for k in sorted_community_schemas]
+ )
+ community_datas = [c for c in community_datas if c is not None]
+ community_datas = [
+ c
+ for c in community_datas
+ if c["report_json"].get("rating", 0) >= query_param.global_min_community_rating
+ ]
+ community_datas = sorted(
+ community_datas,
+ key=lambda x: (x["occurrence"], x["report_json"].get("rating", 0)),
+ reverse=True,
+ )
+ logger.info(f"Revtrieved {len(community_datas)} communities")
+
+ map_communities_points = await _map_global_communities(
+ query, community_datas, query_param, global_config, tokenizer_wrapper
+ )
+ final_support_points = []
+ for i, mc in enumerate(map_communities_points):
+ for point in mc:
+ if "description" not in point:
+ continue
+ final_support_points.append(
+ {
+ "analyst": i,
+ "answer": point["description"],
+ "score": point.get("score", 1),
+ }
+ )
+ final_support_points = [p for p in final_support_points if p["score"] > 0]
+ if not len(final_support_points):
+ return PROMPTS["fail_response"]
+ final_support_points = sorted(
+ final_support_points, key=lambda x: x["score"], reverse=True
+ )
+ final_support_points = truncate_list_by_token_size(
+ final_support_points,
+ key=lambda x: x["answer"],
+ max_token_size=query_param.global_max_token_for_community_report,
+ tokenizer_wrapper=tokenizer_wrapper, # 传入 wrapper
+ )
+ points_context = []
+ for dp in final_support_points:
+ points_context.append(
+ f"""----Analyst {dp['analyst']}----
+Importance Score: {dp['score']}
+{dp['answer']}
+"""
+ )
+ points_context = "\n".join(points_context)
+ if query_param.only_need_context:
+ return points_context
+ sys_prompt_temp = PROMPTS["global_reduce_rag_response"]
+ response = await use_model_func(
+ query,
+ sys_prompt_temp.format(
+ report_data=points_context, response_type=query_param.response_type
+ ),
+ )
+ return response
+
+
+async def naive_query(
+ query,
+ chunks_vdb: BaseVectorStorage,
+ text_chunks_db: BaseKVStorage[TextChunkSchema],
+ query_param: QueryParam,
+ tokenizer_wrapper,
+ global_config: dict,
+):
+ use_model_func = global_config["best_model_func"]
+ results = await chunks_vdb.query(query, top_k=query_param.top_k)
+ if not len(results):
+ return PROMPTS["fail_response"]
+ chunks_ids = [r["id"] for r in results]
+ chunks = await text_chunks_db.get_by_ids(chunks_ids)
+
+ maybe_trun_chunks = truncate_list_by_token_size(
+ chunks,
+ key=lambda x: x["content"],
+ max_token_size=query_param.naive_max_token_for_text_unit,
+ tokenizer_wrapper=tokenizer_wrapper, # 传入 wrapper
+ )
+ logger.info(f"Truncate {len(chunks)} to {len(maybe_trun_chunks)} chunks")
+ section = "--New Chunk--\n".join([c["content"] for c in maybe_trun_chunks])
+ if query_param.only_need_context:
+ return section
+ sys_prompt_temp = PROMPTS["naive_rag_response"]
+ sys_prompt = sys_prompt_temp.format(
+ content_data=section, response_type=query_param.response_type
+ )
+ response = await use_model_func(
+ query,
+ system_prompt=sys_prompt,
+ )
+ return response
diff --git a/rag-web-ui/backend/nano_graphrag/_splitter.py b/rag-web-ui/backend/nano_graphrag/_splitter.py
new file mode 100644
index 0000000..1054d17
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/_splitter.py
@@ -0,0 +1,94 @@
+from typing import List, Optional, Union, Literal
+
+class SeparatorSplitter:
+ def __init__(
+ self,
+ separators: Optional[List[List[int]]] = None,
+ keep_separator: Union[bool, Literal["start", "end"]] = "end",
+ chunk_size: int = 4000,
+ chunk_overlap: int = 200,
+ length_function: callable = len,
+ ):
+ self._separators = separators or []
+ self._keep_separator = keep_separator
+ self._chunk_size = chunk_size
+ self._chunk_overlap = chunk_overlap
+ self._length_function = length_function
+
+ def split_tokens(self, tokens: List[int]) -> List[List[int]]:
+ splits = self._split_tokens_with_separators(tokens)
+ return self._merge_splits(splits)
+
+ def _split_tokens_with_separators(self, tokens: List[int]) -> List[List[int]]:
+ splits = []
+ current_split = []
+ i = 0
+ while i < len(tokens):
+ separator_found = False
+ for separator in self._separators:
+ if tokens[i:i+len(separator)] == separator:
+ if self._keep_separator in [True, "end"]:
+ current_split.extend(separator)
+ if current_split:
+ splits.append(current_split)
+ current_split = []
+ if self._keep_separator == "start":
+ current_split.extend(separator)
+ i += len(separator)
+ separator_found = True
+ break
+ if not separator_found:
+ current_split.append(tokens[i])
+ i += 1
+ if current_split:
+ splits.append(current_split)
+ return [s for s in splits if s]
+
+ def _merge_splits(self, splits: List[List[int]]) -> List[List[int]]:
+ if not splits:
+ return []
+
+ merged_splits = []
+ current_chunk = []
+
+ for split in splits:
+ if not current_chunk:
+ current_chunk = split
+ elif self._length_function(current_chunk) + self._length_function(split) <= self._chunk_size:
+ current_chunk.extend(split)
+ else:
+ merged_splits.append(current_chunk)
+ current_chunk = split
+
+ if current_chunk:
+ merged_splits.append(current_chunk)
+
+ if len(merged_splits) == 1 and self._length_function(merged_splits[0]) > self._chunk_size:
+ return self._split_chunk(merged_splits[0])
+
+ if self._chunk_overlap > 0:
+ return self._enforce_overlap(merged_splits)
+
+ return merged_splits
+
+ def _split_chunk(self, chunk: List[int]) -> List[List[int]]:
+ result = []
+ for i in range(0, len(chunk), self._chunk_size - self._chunk_overlap):
+ new_chunk = chunk[i:i + self._chunk_size]
+ if len(new_chunk) > self._chunk_overlap: # 只有当 chunk 长度大于 overlap 时才添加
+ result.append(new_chunk)
+ return result
+
+ def _enforce_overlap(self, chunks: List[List[int]]) -> List[List[int]]:
+ result = []
+ for i, chunk in enumerate(chunks):
+ if i == 0:
+ result.append(chunk)
+ else:
+ overlap = chunks[i-1][-self._chunk_overlap:]
+ new_chunk = overlap + chunk
+ if self._length_function(new_chunk) > self._chunk_size:
+ new_chunk = new_chunk[:self._chunk_size]
+ result.append(new_chunk)
+ return result
+
diff --git a/rag-web-ui/backend/nano_graphrag/_storage/__init__.py b/rag-web-ui/backend/nano_graphrag/_storage/__init__.py
new file mode 100644
index 0000000..0be593b
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/_storage/__init__.py
@@ -0,0 +1,9 @@
+from .gdb_networkx import NetworkXStorage
+from .gdb_neo4j import Neo4jStorage
+from .vdb_nanovectordb import NanoVectorDBStorage
+from .kv_json import JsonKVStorage
+
+try:
+ from .vdb_hnswlib import HNSWVectorStorage
+except ImportError:
+ HNSWVectorStorage = None
diff --git a/rag-web-ui/backend/nano_graphrag/_storage/gdb_neo4j.py b/rag-web-ui/backend/nano_graphrag/_storage/gdb_neo4j.py
new file mode 100644
index 0000000..54739ae
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/_storage/gdb_neo4j.py
@@ -0,0 +1,529 @@
+import json
+import asyncio
+from collections import defaultdict
+from typing import List
+from neo4j import AsyncGraphDatabase
+from dataclasses import dataclass
+from typing import Union
+from ..base import BaseGraphStorage, SingleCommunitySchema
+from .._utils import logger
+from ..prompt import GRAPH_FIELD_SEP
+
+neo4j_lock = asyncio.Lock()
+
+
+def make_path_idable(path):
+ return path.replace(".", "_").replace("/", "__").replace("-", "_").replace(":", "_").replace("\\", "__")
+
+
+@dataclass
+class Neo4jStorage(BaseGraphStorage):
+ def __post_init__(self):
+ self.neo4j_url = self.global_config["addon_params"].get("neo4j_url", None)
+ self.neo4j_auth = self.global_config["addon_params"].get("neo4j_auth", None)
+ self.namespace = (
+ f"{make_path_idable(self.global_config['working_dir'])}__{self.namespace}"
+ )
+ logger.info(f"Using the label {self.namespace} for Neo4j as identifier")
+ if self.neo4j_url is None or self.neo4j_auth is None:
+ raise ValueError("Missing neo4j_url or neo4j_auth in addon_params")
+ self.async_driver = AsyncGraphDatabase.driver(
+ self.neo4j_url, auth=self.neo4j_auth, max_connection_pool_size=50,
+ )
+
+ # async def create_database(self):
+ # async with self.async_driver.session() as session:
+ # try:
+ # constraints = await session.run("SHOW CONSTRAINTS")
+ # # TODO I don't know why CREATE CONSTRAINT IF NOT EXISTS still trigger error
+ # # so have to check if the constrain exists
+ # constrain_exists = False
+
+ # async for record in constraints:
+ # if (
+ # self.namespace in record["labelsOrTypes"]
+ # and "id" in record["properties"]
+ # and record["type"] == "UNIQUENESS"
+ # ):
+ # constrain_exists = True
+ # break
+ # if not constrain_exists:
+ # await session.run(
+ # f"CREATE CONSTRAINT FOR (n:{self.namespace}) REQUIRE n.id IS UNIQUE"
+ # )
+ # logger.info(f"Add constraint for namespace: {self.namespace}")
+
+ # except Exception as e:
+ # logger.error(f"Error accessing or setting up the database: {str(e)}")
+ # raise
+
+ async def _init_workspace(self):
+ await self.async_driver.verify_authentication()
+ await self.async_driver.verify_connectivity()
+ # TODOLater: create database if not exists always cause an error when async
+ # await self.create_database()
+
+ async def index_start_callback(self):
+ logger.info("Init Neo4j workspace")
+ await self._init_workspace()
+
+ # create index for faster searching
+ try:
+ async with self.async_driver.session() as session:
+ await session.run(
+ f"CREATE INDEX IF NOT EXISTS FOR (n:`{self.namespace}`) ON (n.id)"
+ )
+
+ await session.run(
+ f"CREATE INDEX IF NOT EXISTS FOR (n:`{self.namespace}`) ON (n.entity_type)"
+ )
+
+ await session.run(
+ f"CREATE INDEX IF NOT EXISTS FOR (n:`{self.namespace}`) ON (n.communityIds)"
+ )
+
+ await session.run(
+ f"CREATE INDEX IF NOT EXISTS FOR (n:`{self.namespace}`) ON (n.source_id)"
+ )
+ logger.info("Neo4j indexes created successfully")
+ except Exception as e:
+ logger.error(f"Failed to create indexes: {e}")
+ raise e
+
+ async def has_node(self, node_id: str) -> bool:
+ async with self.async_driver.session() as session:
+ result = await session.run(
+ f"MATCH (n:`{self.namespace}`) WHERE n.id = $node_id RETURN COUNT(n) > 0 AS exists",
+ node_id=node_id,
+ )
+ record = await result.single()
+ return record["exists"] if record else False
+
+ async def has_edge(self, source_node_id: str, target_node_id: str) -> bool:
+ async with self.async_driver.session() as session:
+ result = await session.run(
+ f"""
+ MATCH (s:`{self.namespace}`)
+ WHERE s.id = $source_id
+ MATCH (t:`{self.namespace}`)
+ WHERE t.id = $target_id
+ RETURN EXISTS((s)-[]->(t)) AS exists
+ """,
+ source_id=source_node_id,
+ target_id=target_node_id,
+ )
+
+ record = await result.single()
+ return record["exists"] if record else False
+
+ async def node_degree(self, node_id: str) -> int:
+ results = await self.node_degrees_batch([node_id])
+ return results[0] if results else 0
+
+ async def node_degrees_batch(self, node_ids: List[str]) -> List[str]:
+ if not node_ids:
+ return {}
+
+ result_dict = {node_id: 0 for node_id in node_ids}
+ async with self.async_driver.session() as session:
+ result = await session.run(
+ f"""
+ UNWIND $node_ids AS node_id
+ MATCH (n:`{self.namespace}`)
+ WHERE n.id = node_id
+ OPTIONAL MATCH (n)-[]-(m:`{self.namespace}`)
+ RETURN node_id, COUNT(m) AS degree
+ """,
+ node_ids=node_ids
+ )
+
+ async for record in result:
+ result_dict[record["node_id"]] = record["degree"]
+
+ return [result_dict[node_id] for node_id in node_ids]
+
+ async def edge_degree(self, src_id: str, tgt_id: str) -> int:
+ results = await self.edge_degrees_batch([(src_id, tgt_id)])
+ return results[0] if results else 0
+
+ async def edge_degrees_batch(self, edge_pairs: list[tuple[str, str]]) -> list[int]:
+ if not edge_pairs:
+ return []
+
+ result_dict = {tuple(edge_pair): 0 for edge_pair in edge_pairs}
+
+ edges_params = [{"src_id": src, "tgt_id": tgt} for src, tgt in edge_pairs]
+
+ try:
+ async with self.async_driver.session() as session:
+ result = await session.run(
+ f"""
+ UNWIND $edges AS edge
+
+ MATCH (s:`{self.namespace}`)
+ WHERE s.id = edge.src_id
+ WITH edge, s
+ OPTIONAL MATCH (s)-[]-(n1:`{self.namespace}`)
+ WITH edge, COUNT(n1) AS src_degree
+
+ MATCH (t:`{self.namespace}`)
+ WHERE t.id = edge.tgt_id
+ WITH edge, src_degree, t
+ OPTIONAL MATCH (t)-[]-(n2:`{self.namespace}`)
+ WITH edge.src_id AS src_id, edge.tgt_id AS tgt_id, src_degree, COUNT(n2) AS tgt_degree
+
+ RETURN src_id, tgt_id, src_degree + tgt_degree AS degree
+ """,
+ edges=edges_params
+ )
+
+ async for record in result:
+ src_id = record["src_id"]
+ tgt_id = record["tgt_id"]
+ degree = record["degree"]
+
+ # 更新结果字典
+ edge_pair = (src_id, tgt_id)
+ result_dict[edge_pair] = degree
+
+ return [result_dict[tuple(edge_pair)] for edge_pair in edge_pairs]
+ except Exception as e:
+ logger.error(f"Error in batch edge degree calculation: {e}")
+ return [0] * len(edge_pairs)
+
+
+
+ async def get_node(self, node_id: str) -> Union[dict, None]:
+ result = await self.get_nodes_batch([node_id])
+ return result[0] if result else None
+
+ async def get_nodes_batch(self, node_ids: list[str]) -> dict[str, Union[dict, None]]:
+ if not node_ids:
+ return {}
+
+ result_dict = {node_id: None for node_id in node_ids}
+
+ try:
+ async with self.async_driver.session() as session:
+ result = await session.run(
+ f"""
+ UNWIND $node_ids AS node_id
+ MATCH (n:`{self.namespace}`)
+ WHERE n.id = node_id
+ RETURN node_id, properties(n) AS node_data
+ """,
+ node_ids=node_ids
+ )
+
+ async for record in result:
+ node_id = record["node_id"]
+ raw_node_data = record["node_data"]
+
+ if raw_node_data:
+ raw_node_data["clusters"] = json.dumps(
+ [
+ {
+ "level": index,
+ "cluster": cluster_id,
+ }
+ for index, cluster_id in enumerate(
+ raw_node_data.get("communityIds", [])
+ )
+ ]
+ )
+ result_dict[node_id] = raw_node_data
+ return [result_dict[node_id] for node_id in node_ids]
+ except Exception as e:
+ logger.error(f"Error in batch node retrieval: {e}")
+ raise e
+
+ async def get_edge(
+ self, source_node_id: str, target_node_id: str
+ ) -> Union[dict, None]:
+ results = await self.get_edges_batch([(source_node_id, target_node_id)])
+ return results[0] if results else None
+
+ async def get_edges_batch(
+ self, edge_pairs: list[tuple[str, str]]
+ ) -> list[Union[dict, None]]:
+ if not edge_pairs:
+ return []
+
+ result_dict = {tuple(edge_pair): None for edge_pair in edge_pairs}
+
+ edges_params = [{"source_id": src, "target_id": tgt} for src, tgt in edge_pairs]
+
+ try:
+ async with self.async_driver.session() as session:
+ result = await session.run(
+ f"""
+ UNWIND $edges AS edge
+ MATCH (s:`{self.namespace}`)-[r]->(t:`{self.namespace}`)
+ WHERE s.id = edge.source_id AND t.id = edge.target_id
+ RETURN edge.source_id AS source_id, edge.target_id AS target_id, properties(r) AS edge_data
+ """,
+ edges=edges_params
+ )
+
+ async for record in result:
+ source_id = record["source_id"]
+ target_id = record["target_id"]
+ edge_data = record["edge_data"]
+
+ edge_pair = (source_id, target_id)
+ result_dict[edge_pair] = edge_data
+
+ return [result_dict[tuple(edge_pair)] for edge_pair in edge_pairs]
+ except Exception as e:
+ logger.error(f"Error in batch edge retrieval: {e}")
+ return [None] * len(edge_pairs)
+
+ async def get_node_edges(
+ self, source_node_id: str
+ ) -> list[tuple[str, str]]:
+ results = await self.get_nodes_edges_batch([source_node_id])
+ return results[0] if results else []
+
+ async def get_nodes_edges_batch(
+ self, node_ids: list[str]
+ ) -> list[list[tuple[str, str]]]:
+ if not node_ids:
+ return []
+
+ result_dict = {node_id: [] for node_id in node_ids}
+
+ try:
+ async with self.async_driver.session() as session:
+ result = await session.run(
+ f"""
+ UNWIND $node_ids AS node_id
+ MATCH (s:`{self.namespace}`)-[r]->(t:`{self.namespace}`)
+ WHERE s.id = node_id
+ RETURN s.id AS source_id, t.id AS target_id
+ """,
+ node_ids=node_ids
+ )
+
+ async for record in result:
+ source_id = record["source_id"]
+ target_id = record["target_id"]
+
+ if source_id in result_dict:
+ result_dict[source_id].append((source_id, target_id))
+
+ return [result_dict[node_id] for node_id in node_ids]
+ except Exception as e:
+ logger.error(f"Error in batch node edges retrieval: {e}")
+ return [[] for _ in node_ids]
+
+ async def upsert_node(self, node_id: str, node_data: dict[str, str]):
+ await self.upsert_nodes_batch([(node_id, node_data)])
+
+ async def upsert_nodes_batch(self, nodes_data: list[tuple[str, dict[str, str]]]):
+ if not nodes_data:
+ return []
+
+ nodes_by_type = {}
+ for node_id, node_data in nodes_data:
+ node_type = node_data.get("entity_type", "UNKNOWN").strip('"')
+ if node_type not in nodes_by_type:
+ nodes_by_type[node_type] = []
+ nodes_by_type[node_type].append((node_id, node_data))
+
+ async with self.async_driver.session() as session:
+ for node_type, type_nodes in nodes_by_type.items():
+ params = [{"id": node_id, "data": node_data} for node_id, node_data in type_nodes]
+
+ await session.run(
+ f"""
+ UNWIND $nodes AS node
+ MERGE (n:`{self.namespace}`:`{node_type}` {{id: node.id}})
+ SET n += node.data
+ """,
+ nodes=params
+ )
+
+ async def upsert_edge(
+ self, source_node_id: str, target_node_id: str, edge_data: dict[str, str]
+ ):
+ await self.upsert_edges_batch([(source_node_id, target_node_id, edge_data)])
+
+
+ async def upsert_edges_batch(
+ self, edges_data: list[tuple[str, str, dict[str, str]]]
+ ):
+ if not edges_data:
+ return
+
+ edges_params = []
+ for source_id, target_id, edge_data in edges_data:
+ edge_data_copy = edge_data.copy()
+ edge_data_copy.setdefault("weight", 0.0)
+
+ edges_params.append({
+ "source_id": source_id,
+ "target_id": target_id,
+ "edge_data": edge_data_copy
+ })
+
+ async with self.async_driver.session() as session:
+ await session.run(
+ f"""
+ UNWIND $edges AS edge
+ MATCH (s:`{self.namespace}`)
+ WHERE s.id = edge.source_id
+ WITH edge, s
+ MATCH (t:`{self.namespace}`)
+ WHERE t.id = edge.target_id
+ MERGE (s)-[r:RELATED]->(t)
+ SET r += edge.edge_data
+ """,
+ edges=edges_params
+ )
+
+
+
+
+ async def clustering(self, algorithm: str):
+ if algorithm != "leiden":
+ raise ValueError(
+ f"Clustering algorithm {algorithm} not supported in Neo4j implementation"
+ )
+
+ random_seed = self.global_config["graph_cluster_seed"]
+ max_level = self.global_config["max_graph_cluster_size"]
+ async with self.async_driver.session() as session:
+ try:
+ # Project the graph with undirected relationships
+ await session.run(
+ f"""
+ CALL gds.graph.project(
+ 'graph_{self.namespace}',
+ ['{self.namespace}'],
+ {{
+ RELATED: {{
+ orientation: 'UNDIRECTED',
+ properties: ['weight']
+ }}
+ }}
+ )
+ """
+ )
+
+ # Run Leiden algorithm
+ result = await session.run(
+ f"""
+ CALL gds.leiden.write(
+ 'graph_{self.namespace}',
+ {{
+ writeProperty: 'communityIds',
+ includeIntermediateCommunities: True,
+ relationshipWeightProperty: "weight",
+ maxLevels: {max_level},
+ tolerance: 0.0001,
+ gamma: 1.0,
+ theta: 0.01,
+ randomSeed: {random_seed}
+ }}
+ )
+ YIELD communityCount, modularities;
+ """
+ )
+ result = await result.single()
+ community_count: int = result["communityCount"]
+ modularities = result["modularities"]
+ logger.info(
+ f"Performed graph clustering with {community_count} communities and modularities {modularities}"
+ )
+ finally:
+ # Drop the projected graph
+ await session.run(f"CALL gds.graph.drop('graph_{self.namespace}')")
+
+ async def community_schema(self) -> dict[str, SingleCommunitySchema]:
+ results = defaultdict(
+ lambda: dict(
+ level=None,
+ title=None,
+ edges=set(),
+ nodes=set(),
+ chunk_ids=set(),
+ occurrence=0.0,
+ sub_communities=[],
+ )
+ )
+
+ async with self.async_driver.session() as session:
+ # Fetch community data
+ result = await session.run(
+ f"""
+ MATCH (n:`{self.namespace}`)
+ WITH n, n.communityIds AS communityIds, [(n)-[]-(m:`{self.namespace}`) | m.id] AS connected_nodes
+ RETURN n.id AS node_id, n.source_id AS source_id,
+ communityIds AS cluster_key,
+ connected_nodes
+ """
+ )
+
+ # records = await result.fetch()
+
+ max_num_ids = 0
+ async for record in result:
+ for index, c_id in enumerate(record["cluster_key"]):
+ node_id = str(record["node_id"])
+ source_id = record["source_id"]
+ level = index
+ cluster_key = str(c_id)
+ connected_nodes = record["connected_nodes"]
+
+ results[cluster_key]["level"] = level
+ results[cluster_key]["title"] = f"Cluster {cluster_key}"
+ results[cluster_key]["nodes"].add(node_id)
+ results[cluster_key]["edges"].update(
+ [
+ tuple(sorted([node_id, str(connected)]))
+ for connected in connected_nodes
+ if connected != node_id
+ ]
+ )
+ chunk_ids = source_id.split(GRAPH_FIELD_SEP)
+ results[cluster_key]["chunk_ids"].update(chunk_ids)
+ max_num_ids = max(
+ max_num_ids, len(results[cluster_key]["chunk_ids"])
+ )
+
+ # Process results
+ for k, v in results.items():
+ v["edges"] = [list(e) for e in v["edges"]]
+ v["nodes"] = list(v["nodes"])
+ v["chunk_ids"] = list(v["chunk_ids"])
+ v["occurrence"] = len(v["chunk_ids"]) / max_num_ids
+
+ # Compute sub-communities (this is a simplified approach)
+ for cluster in results.values():
+ cluster["sub_communities"] = [
+ sub_key
+ for sub_key, sub_cluster in results.items()
+ if sub_cluster["level"] > cluster["level"]
+ and set(sub_cluster["nodes"]).issubset(set(cluster["nodes"]))
+ ]
+
+ return dict(results)
+
+ async def index_done_callback(self):
+ await self.async_driver.close()
+
+ async def _debug_delete_all_node_edges(self):
+ async with self.async_driver.session() as session:
+ try:
+ # Delete all relationships in the namespace
+ await session.run(f"MATCH (n:`{self.namespace}`)-[r]-() DELETE r")
+
+ # Delete all nodes in the namespace
+ await session.run(f"MATCH (n:`{self.namespace}`) DELETE n")
+
+ logger.info(
+ f"All nodes and edges in namespace '{self.namespace}' have been deleted."
+ )
+ except Exception as e:
+ logger.error(f"Error deleting nodes and edges: {str(e)}")
+ raise
diff --git a/rag-web-ui/backend/nano_graphrag/_storage/gdb_networkx.py b/rag-web-ui/backend/nano_graphrag/_storage/gdb_networkx.py
new file mode 100644
index 0000000..8be8be8
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/_storage/gdb_networkx.py
@@ -0,0 +1,268 @@
+import html
+import json
+import os
+from collections import defaultdict
+from dataclasses import dataclass
+from typing import Any, Union, cast, List
+import networkx as nx
+import numpy as np
+import asyncio
+
+from .._utils import logger
+from ..base import (
+ BaseGraphStorage,
+ SingleCommunitySchema,
+)
+from ..prompt import GRAPH_FIELD_SEP
+
+
+@dataclass
+class NetworkXStorage(BaseGraphStorage):
+ @staticmethod
+ def load_nx_graph(file_name) -> nx.Graph:
+ if os.path.exists(file_name):
+ return nx.read_graphml(file_name)
+ return None
+
+ @staticmethod
+ def write_nx_graph(graph: nx.Graph, file_name):
+ logger.info(
+ f"Writing graph with {graph.number_of_nodes()} nodes, {graph.number_of_edges()} edges"
+ )
+ nx.write_graphml(graph, file_name)
+
+ @staticmethod
+ def stable_largest_connected_component(graph: nx.Graph) -> nx.Graph:
+ """Refer to https://github.com/microsoft/graphrag/index/graph/utils/stable_lcc.py
+ Return the largest connected component of the graph, with nodes and edges sorted in a stable way.
+ """
+ from graspologic.utils import largest_connected_component
+
+ graph = graph.copy()
+ graph = cast(nx.Graph, largest_connected_component(graph))
+ node_mapping = {node: html.unescape(node.upper().strip()) for node in graph.nodes()} # type: ignore
+ graph = nx.relabel_nodes(graph, node_mapping)
+ return NetworkXStorage._stabilize_graph(graph)
+
+ @staticmethod
+ def _stabilize_graph(graph: nx.Graph) -> nx.Graph:
+ """Refer to https://github.com/microsoft/graphrag/index/graph/utils/stable_lcc.py
+ Ensure an undirected graph with the same relationships will always be read the same way.
+ """
+ fixed_graph = nx.DiGraph() if graph.is_directed() else nx.Graph()
+
+ sorted_nodes = graph.nodes(data=True)
+ sorted_nodes = sorted(sorted_nodes, key=lambda x: x[0])
+
+ fixed_graph.add_nodes_from(sorted_nodes)
+ edges = list(graph.edges(data=True))
+
+ if not graph.is_directed():
+
+ def _sort_source_target(edge):
+ source, target, edge_data = edge
+ if source > target:
+ temp = source
+ source = target
+ target = temp
+ return source, target, edge_data
+
+ edges = [_sort_source_target(edge) for edge in edges]
+
+ def _get_edge_key(source: Any, target: Any) -> str:
+ return f"{source} -> {target}"
+
+ edges = sorted(edges, key=lambda x: _get_edge_key(x[0], x[1]))
+
+ fixed_graph.add_edges_from(edges)
+ return fixed_graph
+
+ def __post_init__(self):
+ self._graphml_xml_file = os.path.join(
+ self.global_config["working_dir"], f"graph_{self.namespace}.graphml"
+ )
+ preloaded_graph = NetworkXStorage.load_nx_graph(self._graphml_xml_file)
+ if preloaded_graph is not None:
+ logger.info(
+ f"Loaded graph from {self._graphml_xml_file} with {preloaded_graph.number_of_nodes()} nodes, {preloaded_graph.number_of_edges()} edges"
+ )
+ self._graph = preloaded_graph or nx.Graph()
+ self._clustering_algorithms = {
+ "leiden": self._leiden_clustering,
+ }
+ self._node_embed_algorithms = {
+ "node2vec": self._node2vec_embed,
+ }
+
+ async def index_done_callback(self):
+ NetworkXStorage.write_nx_graph(self._graph, self._graphml_xml_file)
+
+ async def has_node(self, node_id: str) -> bool:
+ return self._graph.has_node(node_id)
+
+ async def has_edge(self, source_node_id: str, target_node_id: str) -> bool:
+ return self._graph.has_edge(source_node_id, target_node_id)
+
+ async def get_node(self, node_id: str) -> Union[dict, None]:
+ return self._graph.nodes.get(node_id)
+
+ async def get_nodes_batch(self, node_ids: list[str]) -> dict[str, Union[dict, None]]:
+ return await asyncio.gather(*[self.get_node(node_id) for node_id in node_ids])
+
+ async def node_degree(self, node_id: str) -> int:
+ # [numberchiffre]: node_id not part of graph returns `DegreeView({})` instead of 0
+ return self._graph.degree(node_id) if self._graph.has_node(node_id) else 0
+
+ async def node_degrees_batch(self, node_ids: List[str]) -> List[str]:
+ return await asyncio.gather(*[self.node_degree(node_id) for node_id in node_ids])
+
+ async def edge_degree(self, src_id: str, tgt_id: str) -> int:
+ return (self._graph.degree(src_id) if self._graph.has_node(src_id) else 0) + (
+ self._graph.degree(tgt_id) if self._graph.has_node(tgt_id) else 0
+ )
+
+ async def edge_degrees_batch(self, edge_pairs: list[tuple[str, str]]) -> list[int]:
+ return await asyncio.gather(*[self.edge_degree(src_id, tgt_id) for src_id, tgt_id in edge_pairs])
+
+ async def get_edge(
+ self, source_node_id: str, target_node_id: str
+ ) -> Union[dict, None]:
+ return self._graph.edges.get((source_node_id, target_node_id))
+
+ async def get_edges_batch(
+ self, edge_pairs: list[tuple[str, str]]
+ ) -> list[Union[dict, None]]:
+ return await asyncio.gather(*[self.get_edge(source_node_id, target_node_id) for source_node_id, target_node_id in edge_pairs])
+
+ async def get_node_edges(self, source_node_id: str):
+ if self._graph.has_node(source_node_id):
+ return list(self._graph.edges(source_node_id))
+ return None
+
+ async def get_nodes_edges_batch(
+ self, node_ids: list[str]
+ ) -> list[list[tuple[str, str]]]:
+ return await asyncio.gather(*[self.get_node_edges(node_id) for node_id
+ in node_ids])
+
+ async def upsert_node(self, node_id: str, node_data: dict[str, str]):
+ self._graph.add_node(node_id, **node_data)
+
+ async def upsert_nodes_batch(self, nodes_data: list[tuple[str, dict[str, str]]]):
+ await asyncio.gather(*[self.upsert_node(node_id, node_data) for node_id, node_data in nodes_data])
+
+ async def upsert_edge(
+ self, source_node_id: str, target_node_id: str, edge_data: dict[str, str]
+ ):
+ self._graph.add_edge(source_node_id, target_node_id, **edge_data)
+
+ async def upsert_edges_batch(
+ self, edges_data: list[tuple[str, str, dict[str, str]]]
+ ):
+ await asyncio.gather(*[self.upsert_edge(source_node_id, target_node_id, edge_data)
+ for source_node_id, target_node_id, edge_data in edges_data])
+
+ async def clustering(self, algorithm: str):
+ if algorithm not in self._clustering_algorithms:
+ raise ValueError(f"Clustering algorithm {algorithm} not supported")
+ await self._clustering_algorithms[algorithm]()
+
+ async def community_schema(self) -> dict[str, SingleCommunitySchema]:
+ results = defaultdict(
+ lambda: dict(
+ level=None,
+ title=None,
+ edges=set(),
+ nodes=set(),
+ chunk_ids=set(),
+ occurrence=0.0,
+ sub_communities=[],
+ )
+ )
+ max_num_ids = 0
+ levels = defaultdict(set)
+ for node_id, node_data in self._graph.nodes(data=True):
+ if "clusters" not in node_data:
+ continue
+ clusters = json.loads(node_data["clusters"])
+ this_node_edges = self._graph.edges(node_id)
+
+ for cluster in clusters:
+ level = cluster["level"]
+ cluster_key = str(cluster["cluster"])
+ levels[level].add(cluster_key)
+ results[cluster_key]["level"] = level
+ results[cluster_key]["title"] = f"Cluster {cluster_key}"
+ results[cluster_key]["nodes"].add(node_id)
+ results[cluster_key]["edges"].update(
+ [tuple(sorted(e)) for e in this_node_edges]
+ )
+ results[cluster_key]["chunk_ids"].update(
+ node_data["source_id"].split(GRAPH_FIELD_SEP)
+ )
+ max_num_ids = max(max_num_ids, len(results[cluster_key]["chunk_ids"]))
+
+ ordered_levels = sorted(levels.keys())
+ for i, curr_level in enumerate(ordered_levels[:-1]):
+ next_level = ordered_levels[i + 1]
+ this_level_comms = levels[curr_level]
+ next_level_comms = levels[next_level]
+ # compute the sub-communities by nodes intersection
+ for comm in this_level_comms:
+ results[comm]["sub_communities"] = [
+ c
+ for c in next_level_comms
+ if results[c]["nodes"].issubset(results[comm]["nodes"])
+ ]
+
+ for k, v in results.items():
+ v["edges"] = list(v["edges"])
+ v["edges"] = [list(e) for e in v["edges"]]
+ v["nodes"] = list(v["nodes"])
+ v["chunk_ids"] = list(v["chunk_ids"])
+ v["occurrence"] = len(v["chunk_ids"]) / max_num_ids
+ return dict(results)
+
+ def _cluster_data_to_subgraphs(self, cluster_data: dict[str, list[dict[str, str]]]):
+ for node_id, clusters in cluster_data.items():
+ self._graph.nodes[node_id]["clusters"] = json.dumps(clusters)
+
+ async def _leiden_clustering(self):
+ from graspologic.partition import hierarchical_leiden
+
+ graph = NetworkXStorage.stable_largest_connected_component(self._graph)
+ community_mapping = hierarchical_leiden(
+ graph,
+ max_cluster_size=self.global_config["max_graph_cluster_size"],
+ random_seed=self.global_config["graph_cluster_seed"],
+ )
+
+ node_communities: dict[str, list[dict[str, str]]] = defaultdict(list)
+ __levels = defaultdict(set)
+ for partition in community_mapping:
+ level_key = partition.level
+ cluster_id = partition.cluster
+ node_communities[partition.node].append(
+ {"level": level_key, "cluster": cluster_id}
+ )
+ __levels[level_key].add(cluster_id)
+ node_communities = dict(node_communities)
+ __levels = {k: len(v) for k, v in __levels.items()}
+ logger.info(f"Each level has communities: {dict(__levels)}")
+ self._cluster_data_to_subgraphs(node_communities)
+
+ async def embed_nodes(self, algorithm: str) -> tuple[np.ndarray, list[str]]:
+ if algorithm not in self._node_embed_algorithms:
+ raise ValueError(f"Node embedding algorithm {algorithm} not supported")
+ return await self._node_embed_algorithms[algorithm]()
+
+ async def _node2vec_embed(self):
+ from graspologic import embed
+
+ embeddings, nodes = embed.node2vec_embed(
+ self._graph,
+ **self.global_config["node2vec_params"],
+ )
+
+ nodes_ids = [self._graph.nodes[node_id]["id"] for node_id in nodes]
+ return embeddings, nodes_ids
diff --git a/rag-web-ui/backend/nano_graphrag/_storage/kv_json.py b/rag-web-ui/backend/nano_graphrag/_storage/kv_json.py
new file mode 100644
index 0000000..b802f26
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/_storage/kv_json.py
@@ -0,0 +1,46 @@
+import os
+from dataclasses import dataclass
+
+from .._utils import load_json, logger, write_json
+from ..base import (
+ BaseKVStorage,
+)
+
+
+@dataclass
+class JsonKVStorage(BaseKVStorage):
+ def __post_init__(self):
+ working_dir = self.global_config["working_dir"]
+ self._file_name = os.path.join(working_dir, f"kv_store_{self.namespace}.json")
+ self._data = load_json(self._file_name) or {}
+ logger.info(f"Load KV {self.namespace} with {len(self._data)} data")
+
+ async def all_keys(self) -> list[str]:
+ return list(self._data.keys())
+
+ async def index_done_callback(self):
+ write_json(self._data, self._file_name)
+
+ async def get_by_id(self, id):
+ return self._data.get(id, None)
+
+ async def get_by_ids(self, ids, fields=None):
+ if fields is None:
+ return [self._data.get(id, None) for id in ids]
+ return [
+ (
+ {k: v for k, v in self._data[id].items() if k in fields}
+ if self._data.get(id, None)
+ else None
+ )
+ for id in ids
+ ]
+
+ async def filter_keys(self, data: list[str]) -> set[str]:
+ return set([s for s in data if s not in self._data])
+
+ async def upsert(self, data: dict[str, dict]):
+ self._data.update(data)
+
+ async def drop(self):
+ self._data = {}
diff --git a/rag-web-ui/backend/nano_graphrag/_storage/vdb_hnswlib.py b/rag-web-ui/backend/nano_graphrag/_storage/vdb_hnswlib.py
new file mode 100644
index 0000000..3e98c95
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/_storage/vdb_hnswlib.py
@@ -0,0 +1,141 @@
+import asyncio
+import os
+from dataclasses import dataclass, field
+from typing import Any
+import pickle
+import hnswlib
+import numpy as np
+import xxhash
+
+from .._utils import logger
+from ..base import BaseVectorStorage
+
+
+@dataclass
+class HNSWVectorStorage(BaseVectorStorage):
+ ef_construction: int = 100
+ M: int = 16
+ max_elements: int = 1000000
+ ef_search: int = 50
+ num_threads: int = -1
+ _index: Any = field(init=False)
+ _metadata: dict[str, dict] = field(default_factory=dict)
+ _current_elements: int = 0
+
+ def __post_init__(self):
+ self._index_file_name = os.path.join(
+ self.global_config["working_dir"], f"{self.namespace}_hnsw.index"
+ )
+ self._metadata_file_name = os.path.join(
+ self.global_config["working_dir"], f"{self.namespace}_hnsw_metadata.pkl"
+ )
+ self._embedding_batch_num = self.global_config.get("embedding_batch_num", 100)
+
+ hnsw_params = self.global_config.get("vector_db_storage_cls_kwargs", {})
+ self.ef_construction = hnsw_params.get("ef_construction", self.ef_construction)
+ self.M = hnsw_params.get("M", self.M)
+ self.max_elements = hnsw_params.get("max_elements", self.max_elements)
+ self.ef_search = hnsw_params.get("ef_search", self.ef_search)
+ self.num_threads = hnsw_params.get("num_threads", self.num_threads)
+ self._index = hnswlib.Index(
+ space="cosine", dim=self.embedding_func.embedding_dim
+ )
+
+ if os.path.exists(self._index_file_name) and os.path.exists(
+ self._metadata_file_name
+ ):
+ self._index.load_index(
+ self._index_file_name, max_elements=self.max_elements
+ )
+ with open(self._metadata_file_name, "rb") as f:
+ self._metadata, self._current_elements = pickle.load(f)
+ logger.info(
+ f"Loaded existing index for {self.namespace} with {self._current_elements} elements"
+ )
+ else:
+ self._index.init_index(
+ max_elements=self.max_elements,
+ ef_construction=self.ef_construction,
+ M=self.M,
+ )
+ self._index.set_ef(self.ef_search)
+ self._metadata = {}
+ self._current_elements = 0
+ logger.info(f"Created new index for {self.namespace}")
+
+ async def upsert(self, data: dict[str, dict]) -> np.ndarray:
+ logger.info(f"Inserting {len(data)} vectors to {self.namespace}")
+ if not data:
+ logger.warning("You insert an empty data to vector DB")
+ return []
+
+ if self._current_elements + len(data) > self.max_elements:
+ raise ValueError(
+ f"Cannot insert {len(data)} elements. Current: {self._current_elements}, Max: {self.max_elements}"
+ )
+
+ list_data = [
+ {
+ "id": k,
+ **{k1: v1 for k1, v1 in v.items() if k1 in self.meta_fields},
+ }
+ for k, v in data.items()
+ ]
+ contents = [v["content"] for v in data.values()]
+ batch_size = min(self._embedding_batch_num, len(contents))
+ embeddings = np.concatenate(
+ await asyncio.gather(
+ *[
+ self.embedding_func(contents[i : i + batch_size])
+ for i in range(0, len(contents), batch_size)
+ ]
+ )
+ )
+
+ ids = np.fromiter(
+ (xxhash.xxh32_intdigest(d["id"].encode()) for d in list_data),
+ dtype=np.uint32,
+ count=len(list_data),
+ )
+ self._metadata.update(
+ {
+ id_int: {
+ k: v for k, v in d.items() if k in self.meta_fields or k == "id"
+ }
+ for id_int, d in zip(ids, list_data)
+ }
+ )
+ self._index.add_items(data=embeddings, ids=ids, num_threads=self.num_threads)
+ self._current_elements = self._index.get_current_count()
+ return ids
+
+ async def query(self, query: str, top_k: int = 5) -> list[dict]:
+ if self._current_elements == 0:
+ return []
+
+ top_k = min(top_k, self._current_elements)
+
+ if top_k > self.ef_search:
+ logger.warning(
+ f"Setting ef_search to {top_k} because top_k is larger than ef_search"
+ )
+ self._index.set_ef(top_k)
+
+ embedding = await self.embedding_func([query])
+ labels, distances = self._index.knn_query(
+ data=embedding[0], k=top_k, num_threads=self.num_threads
+ )
+
+ return [
+ {
+ **self._metadata.get(label, {}),
+ "distance": distance,
+ "similarity": 1 - distance,
+ }
+ for label, distance in zip(labels[0], distances[0])
+ ]
+
+ async def index_done_callback(self):
+ self._index.save_index(self._index_file_name)
+ with open(self._metadata_file_name, "wb") as f:
+ pickle.dump((self._metadata, self._current_elements), f)
diff --git a/rag-web-ui/backend/nano_graphrag/_storage/vdb_nanovectordb.py b/rag-web-ui/backend/nano_graphrag/_storage/vdb_nanovectordb.py
new file mode 100644
index 0000000..f73ab06
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/_storage/vdb_nanovectordb.py
@@ -0,0 +1,68 @@
+import asyncio
+import os
+from dataclasses import dataclass
+import numpy as np
+from nano_vectordb import NanoVectorDB
+
+from .._utils import logger
+from ..base import BaseVectorStorage
+
+
+@dataclass
+class NanoVectorDBStorage(BaseVectorStorage):
+ cosine_better_than_threshold: float = 0.2
+
+ def __post_init__(self):
+
+ self._client_file_name = os.path.join(
+ self.global_config["working_dir"], f"vdb_{self.namespace}.json"
+ )
+ self._max_batch_size = self.global_config["embedding_batch_num"]
+ self._client = NanoVectorDB(
+ self.embedding_func.embedding_dim, storage_file=self._client_file_name
+ )
+ self.cosine_better_than_threshold = self.global_config.get(
+ "query_better_than_threshold", self.cosine_better_than_threshold
+ )
+
+ async def upsert(self, data: dict[str, dict]):
+ logger.info(f"Inserting {len(data)} vectors to {self.namespace}")
+ if not len(data):
+ logger.warning("You insert an empty data to vector DB")
+ return []
+ list_data = [
+ {
+ "__id__": k,
+ **{k1: v1 for k1, v1 in v.items() if k1 in self.meta_fields},
+ }
+ for k, v in data.items()
+ ]
+ contents = [v["content"] for v in data.values()]
+ batches = [
+ contents[i : i + self._max_batch_size]
+ for i in range(0, len(contents), self._max_batch_size)
+ ]
+ embeddings_list = await asyncio.gather(
+ *[self.embedding_func(batch) for batch in batches]
+ )
+ embeddings = np.concatenate(embeddings_list)
+ for i, d in enumerate(list_data):
+ d["__vector__"] = embeddings[i]
+ results = self._client.upsert(datas=list_data)
+ return results
+
+ async def query(self, query: str, top_k=5):
+ embedding = await self.embedding_func([query])
+ embedding = embedding[0]
+ results = self._client.query(
+ query=embedding,
+ top_k=top_k,
+ better_than_threshold=self.cosine_better_than_threshold,
+ )
+ results = [
+ {**dp, "id": dp["__id__"], "distance": dp["__metrics__"]} for dp in results
+ ]
+ return results
+
+ async def index_done_callback(self):
+ self._client.save()
diff --git a/rag-web-ui/backend/nano_graphrag/_utils.py b/rag-web-ui/backend/nano_graphrag/_utils.py
new file mode 100644
index 0000000..03e3118
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/_utils.py
@@ -0,0 +1,307 @@
+import asyncio
+import html
+import json
+import logging
+import os
+import re
+import numbers
+from dataclasses import dataclass
+from functools import wraps
+from hashlib import md5
+from typing import Any, Union, Literal
+
+import numpy as np
+import tiktoken
+
+try:
+ from transformers import AutoTokenizer
+except ImportError:
+ AutoTokenizer = None
+
+logger = logging.getLogger("nano-graphrag")
+logging.getLogger("neo4j").setLevel(logging.ERROR)
+
+def always_get_an_event_loop() -> asyncio.AbstractEventLoop:
+ try:
+ # If there is already an event loop, use it.
+ loop = asyncio.get_event_loop()
+ except RuntimeError:
+ # If in a sub-thread, create a new event loop.
+ logger.info("Creating a new event loop in a sub-thread.")
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ return loop
+
+
+def extract_first_complete_json(s: str):
+ """Extract the first complete JSON object from the string using a stack to track braces."""
+ stack = []
+ first_json_start = None
+
+ for i, char in enumerate(s):
+ if char == '{':
+ stack.append(i)
+ if first_json_start is None:
+ first_json_start = i
+ elif char == '}':
+ if stack:
+ start = stack.pop()
+ if not stack:
+ first_json_str = s[first_json_start:i+1]
+ try:
+ # Attempt to parse the JSON string
+ return json.loads(first_json_str.replace("\n", ""))
+ except json.JSONDecodeError as e:
+ logger.error(f"JSON decoding failed: {e}. Attempted string: {first_json_str[:50]}...")
+ return None
+ finally:
+ first_json_start = None
+ logger.warning("No complete JSON object found in the input string.")
+ return None
+
+def parse_value(value: str):
+ """Convert a string value to its appropriate type (int, float, bool, None, or keep as string). Work as a more broad 'eval()'"""
+ value = value.strip()
+
+ if value == "null":
+ return None
+ elif value == "true":
+ return True
+ elif value == "false":
+ return False
+ else:
+ # Try to convert to int or float
+ try:
+ if '.' in value: # If there's a dot, it might be a float
+ return float(value)
+ else:
+ return int(value)
+ except ValueError:
+ # If conversion fails, return the value as-is (likely a string)
+ return value.strip('"') # Remove surrounding quotes if they exist
+
+def extract_values_from_json(json_string, keys=["reasoning", "answer", "data"], allow_no_quotes=False):
+ """Extract key values from a non-standard or malformed JSON string, handling nested objects."""
+ extracted_values = {}
+
+ # Enhanced pattern to match both quoted and unquoted values, as well as nested objects
+ regex_pattern = r'(?P"?\w+"?)\s*:\s*(?P{[^}]*}|".*?"|[^,}]+)'
+
+ for match in re.finditer(regex_pattern, json_string, re.DOTALL):
+ key = match.group('key').strip('"') # Strip quotes from key
+ value = match.group('value').strip()
+
+ # If the value is another nested JSON (starts with '{' and ends with '}'), recursively parse it
+ if value.startswith('{') and value.endswith('}'):
+ extracted_values[key] = extract_values_from_json(value)
+ else:
+ # Parse the value into the appropriate type (int, float, bool, etc.)
+ extracted_values[key] = parse_value(value)
+
+ if not extracted_values:
+ logger.warning("No values could be extracted from the string.")
+
+ return extracted_values
+
+
+def convert_response_to_json(response: str) -> dict:
+ """Convert response string to JSON, with error handling and fallback to non-standard JSON extraction."""
+ prediction_json = extract_first_complete_json(response)
+
+ if prediction_json is None:
+ logger.info("Attempting to extract values from a non-standard JSON string...")
+ prediction_json = extract_values_from_json(response, allow_no_quotes=True)
+
+ if not prediction_json:
+ logger.error("Unable to extract meaningful data from the response.")
+ else:
+ logger.info("JSON data successfully extracted.")
+
+ return prediction_json
+
+
+
+
+class TokenizerWrapper:
+ def __init__(self, tokenizer_type: Literal["tiktoken", "huggingface"] = "tiktoken", model_name: str = "gpt-4o"):
+ self.tokenizer_type = tokenizer_type
+ self.model_name = model_name
+ self._tokenizer = None
+ self._lazy_load_tokenizer()
+
+ def _lazy_load_tokenizer(self):
+ if self._tokenizer is not None:
+ return
+ logger.info(f"Loading tokenizer: type='{self.tokenizer_type}', name='{self.model_name}'")
+ if self.tokenizer_type == "tiktoken":
+ self._tokenizer = tiktoken.encoding_for_model(self.model_name)
+ elif self.tokenizer_type == "huggingface":
+ if AutoTokenizer is None:
+ raise ImportError("`transformers` is not installed. Please install it via `pip install transformers` to use HuggingFace tokenizers.")
+ self._tokenizer = AutoTokenizer.from_pretrained(self.model_name, use_fast=True)
+ else:
+ raise ValueError(f"Unknown tokenizer_type: {self.tokenizer_type}")
+
+ def get_tokenizer(self):
+ """提供对底层 tokenizer 对象的访问,用于特殊情况(如 decode_batch)。"""
+ self._lazy_load_tokenizer()
+ return self._tokenizer
+
+ def encode(self, text: str) -> list[int]:
+ self._lazy_load_tokenizer()
+ return self._tokenizer.encode(text)
+
+ def decode(self, tokens: list[int]) -> str:
+ self._lazy_load_tokenizer()
+ return self._tokenizer.decode(tokens)
+
+ # +++ 新增 +++: 增加一个批量解码的方法以提高效率,并保持接口一致性
+ def decode_batch(self, tokens_list: list[list[int]]) -> list[str]:
+ self._lazy_load_tokenizer()
+ # HuggingFace tokenizer 有 decode_batch,但 tiktoken 没有,我们用列表推导来模拟
+ if self.tokenizer_type == "tiktoken":
+ return [self._tokenizer.decode(tokens) for tokens in tokens_list]
+ elif self.tokenizer_type == "huggingface":
+ return self._tokenizer.batch_decode(tokens_list, skip_special_tokens=True)
+ else:
+ raise ValueError(f"Unknown tokenizer_type: {self.tokenizer_type}")
+
+
+
+def truncate_list_by_token_size(
+ list_data: list,
+ key: callable,
+ max_token_size: int,
+ tokenizer_wrapper: TokenizerWrapper
+):
+ """Truncate a list of data by token size using a provided tokenizer wrapper."""
+ if max_token_size <= 0:
+ return []
+ tokens = 0
+ for i, data in enumerate(list_data):
+ tokens += len(tokenizer_wrapper.encode(key(data))) + 1 # 防御性,模拟通过\n拼接列表的情况
+ if tokens > max_token_size:
+ return list_data[:i]
+ return list_data
+
+
+def compute_mdhash_id(content, prefix: str = ""):
+ return prefix + md5(content.encode()).hexdigest()
+
+
+def write_json(json_obj, file_name):
+ with open(file_name, "w", encoding="utf-8") as f:
+ json.dump(json_obj, f, indent=2, ensure_ascii=False)
+
+
+def load_json(file_name):
+ if not os.path.exists(file_name):
+ return None
+ with open(file_name, encoding="utf-8") as f:
+ return json.load(f)
+
+
+# it's dirty to type, so it's a good way to have fun
+def pack_user_ass_to_openai_messages(prompt: str, generated_content: str, using_amazon_bedrock: bool):
+ if using_amazon_bedrock:
+ return [
+ {"role": "user", "content": [{"text": prompt}]},
+ {"role": "assistant", "content": [{"text": generated_content}]},
+ ]
+ else:
+ return [
+ {"role": "user", "content": prompt},
+ {"role": "assistant", "content": generated_content},
+ ]
+
+
+def is_float_regex(value):
+ return bool(re.match(r"^[-+]?[0-9]*\.?[0-9]+$", value))
+
+
+def compute_args_hash(*args):
+ return md5(str(args).encode()).hexdigest()
+
+
+def split_string_by_multi_markers(content: str, markers: list[str]) -> list[str]:
+ """Split a string by multiple markers"""
+ if not markers:
+ return [content]
+ results = re.split("|".join(re.escape(marker) for marker in markers), content)
+ return [r.strip() for r in results if r.strip()]
+
+
+def enclose_string_with_quotes(content: Any) -> str:
+ """Enclose a string with quotes"""
+ if isinstance(content, numbers.Number):
+ return str(content)
+ content = str(content)
+ content = content.strip().strip("'").strip('"')
+ return f'"{content}"'
+
+
+def list_of_list_to_csv(data: list[list]):
+ return "\n".join(
+ [
+ ",\t".join([f"{enclose_string_with_quotes(data_dd)}" for data_dd in data_d])
+ for data_d in data
+ ]
+ )
+
+
+# -----------------------------------------------------------------------------------
+# Refer the utils functions of the official GraphRAG implementation:
+# https://github.com/microsoft/graphrag
+def clean_str(input: Any) -> str:
+ """Clean an input string by removing HTML escapes, control characters, and other unwanted characters."""
+ # If we get non-string input, just give it back
+ if not isinstance(input, str):
+ return input
+
+ result = html.unescape(input.strip())
+ # https://stackoverflow.com/questions/4324790/removing-control-characters-from-a-string-in-python
+ return re.sub(r"[\x00-\x1f\x7f-\x9f]", "", result)
+
+
+# Utils types -----------------------------------------------------------------------
+@dataclass
+class EmbeddingFunc:
+ embedding_dim: int
+ max_token_size: int
+ func: callable
+
+ async def __call__(self, *args, **kwargs) -> np.ndarray:
+ return await self.func(*args, **kwargs)
+
+
+# Decorators ------------------------------------------------------------------------
+def limit_async_func_call(max_size: int, waitting_time: float = 0.0001):
+ """Add restriction of maximum async calling times for a async func"""
+
+ def final_decro(func):
+ """Not using async.Semaphore to aovid use nest-asyncio"""
+ __current_size = 0
+
+ @wraps(func)
+ async def wait_func(*args, **kwargs):
+ nonlocal __current_size
+ while __current_size >= max_size:
+ await asyncio.sleep(waitting_time)
+ __current_size += 1
+ result = await func(*args, **kwargs)
+ __current_size -= 1
+ return result
+
+ return wait_func
+
+ return final_decro
+
+
+def wrap_embedding_func_with_attrs(**kwargs):
+ """Wrap a function with attributes"""
+
+ def final_decro(func) -> EmbeddingFunc:
+ new_func = EmbeddingFunc(**kwargs, func=func)
+ return new_func
+
+ return final_decro
diff --git a/rag-web-ui/backend/nano_graphrag/base.py b/rag-web-ui/backend/nano_graphrag/base.py
new file mode 100644
index 0000000..e340941
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/base.py
@@ -0,0 +1,186 @@
+from dataclasses import dataclass, field
+from typing import TypedDict, Union, Literal, Generic, TypeVar, List
+
+import numpy as np
+
+from ._utils import EmbeddingFunc
+
+
+@dataclass
+class QueryParam:
+ mode: Literal["local", "global", "naive"] = "global"
+ only_need_context: bool = False
+ response_type: str = "Multiple Paragraphs"
+ level: int = 2
+ top_k: int = 20
+ # naive search
+ naive_max_token_for_text_unit = 12000
+ # local search
+ local_max_token_for_text_unit: int = 4000 # 12000 * 0.33
+ local_max_token_for_local_context: int = 4800 # 12000 * 0.4
+ local_max_token_for_community_report: int = 3200 # 12000 * 0.27
+ local_community_single_one: bool = False
+ # global search
+ global_min_community_rating: float = 0
+ global_max_consider_community: float = 512
+ global_max_token_for_community_report: int = 16384
+ global_special_community_map_llm_kwargs: dict = field(
+ default_factory=lambda: {"response_format": {"type": "json_object"}}
+ )
+
+
+TextChunkSchema = TypedDict(
+ "TextChunkSchema",
+ {"tokens": int, "content": str, "full_doc_id": str, "chunk_order_index": int},
+)
+
+SingleCommunitySchema = TypedDict(
+ "SingleCommunitySchema",
+ {
+ "level": int,
+ "title": str,
+ "edges": list[list[str, str]],
+ "nodes": list[str],
+ "chunk_ids": list[str],
+ "occurrence": float,
+ "sub_communities": list[str],
+ },
+)
+
+
+class CommunitySchema(SingleCommunitySchema):
+ report_string: str
+ report_json: dict
+
+
+T = TypeVar("T")
+
+
+@dataclass
+class StorageNameSpace:
+ namespace: str
+ global_config: dict
+
+ async def index_start_callback(self):
+ """commit the storage operations after indexing"""
+ pass
+
+ async def index_done_callback(self):
+ """commit the storage operations after indexing"""
+ pass
+
+ async def query_done_callback(self):
+ """commit the storage operations after querying"""
+ pass
+
+
+@dataclass
+class BaseVectorStorage(StorageNameSpace):
+ embedding_func: EmbeddingFunc
+ meta_fields: set = field(default_factory=set)
+
+ async def query(self, query: str, top_k: int) -> list[dict]:
+ raise NotImplementedError
+
+ async def upsert(self, data: dict[str, dict]):
+ """Use 'content' field from value for embedding, use key as id.
+ If embedding_func is None, use 'embedding' field from value
+ """
+ raise NotImplementedError
+
+
+@dataclass
+class BaseKVStorage(Generic[T], StorageNameSpace):
+ async def all_keys(self) -> list[str]:
+ raise NotImplementedError
+
+ async def get_by_id(self, id: str) -> Union[T, None]:
+ raise NotImplementedError
+
+ async def get_by_ids(
+ self, ids: list[str], fields: Union[set[str], None] = None
+ ) -> list[Union[T, None]]:
+ raise NotImplementedError
+
+ async def filter_keys(self, data: list[str]) -> set[str]:
+ """return un-exist keys"""
+ raise NotImplementedError
+
+ async def upsert(self, data: dict[str, T]):
+ raise NotImplementedError
+
+ async def drop(self):
+ raise NotImplementedError
+
+
+@dataclass
+class BaseGraphStorage(StorageNameSpace):
+ async def has_node(self, node_id: str) -> bool:
+ raise NotImplementedError
+
+ async def has_edge(self, source_node_id: str, target_node_id: str) -> bool:
+ raise NotImplementedError
+
+ async def node_degree(self, node_id: str) -> int:
+ raise NotImplementedError
+
+ async def node_degrees_batch(self, node_ids: List[str]) -> List[str]:
+ raise NotImplementedError
+
+ async def edge_degree(self, src_id: str, tgt_id: str) -> int:
+ raise NotImplementedError
+
+ async def edge_degrees_batch(self, edge_pairs: list[tuple[str, str]]) -> list[int]:
+ raise NotImplementedError
+
+ async def get_node(self, node_id: str) -> Union[dict, None]:
+ raise NotImplementedError
+
+ async def get_nodes_batch(self, node_ids: list[str]) -> dict[str, Union[dict, None]]:
+ raise NotImplementedError
+
+ async def get_edge(
+ self, source_node_id: str, target_node_id: str
+ ) -> Union[dict, None]:
+ raise NotImplementedError
+
+ async def get_edges_batch(
+ self, edge_pairs: list[tuple[str, str]]
+ ) -> list[Union[dict, None]]:
+ raise NotImplementedError
+
+ async def get_node_edges(
+ self, source_node_id: str
+ ) -> Union[list[tuple[str, str]], None]:
+ raise NotImplementedError
+
+ async def get_nodes_edges_batch(
+ self, node_ids: list[str]
+ ) -> list[list[tuple[str, str]]]:
+ raise NotImplementedError
+
+ async def upsert_node(self, node_id: str, node_data: dict[str, str]):
+ raise NotImplementedError
+
+ async def upsert_nodes_batch(self, nodes_data: list[tuple[str, dict[str, str]]]):
+ raise NotImplementedError
+
+ async def upsert_edge(
+ self, source_node_id: str, target_node_id: str, edge_data: dict[str, str]
+ ):
+ raise NotImplementedError
+
+ async def upsert_edges_batch(
+ self, edges_data: list[tuple[str, str, dict[str, str]]]
+ ):
+ raise NotImplementedError
+
+ async def clustering(self, algorithm: str):
+ raise NotImplementedError
+
+ async def community_schema(self) -> dict[str, SingleCommunitySchema]:
+ """Return the community representation with report and nodes"""
+ raise NotImplementedError
+
+ async def embed_nodes(self, algorithm: str) -> tuple[np.ndarray, list[str]]:
+ raise NotImplementedError("Node embedding is not used in nano-graphrag.")
diff --git a/rag-web-ui/backend/nano_graphrag/entity_extraction/__init__.py b/rag-web-ui/backend/nano_graphrag/entity_extraction/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/rag-web-ui/backend/nano_graphrag/entity_extraction/extract.py b/rag-web-ui/backend/nano_graphrag/entity_extraction/extract.py
new file mode 100644
index 0000000..45f1607
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/entity_extraction/extract.py
@@ -0,0 +1,171 @@
+from typing import Union
+import pickle
+import asyncio
+from openai import BadRequestError
+from collections import defaultdict
+import dspy
+from nano_graphrag.base import (
+ BaseGraphStorage,
+ BaseVectorStorage,
+ TextChunkSchema,
+)
+from nano_graphrag.prompt import PROMPTS
+from nano_graphrag._utils import logger, compute_mdhash_id
+from nano_graphrag.entity_extraction.module import TypedEntityRelationshipExtractor
+from nano_graphrag._op import _merge_edges_then_upsert, _merge_nodes_then_upsert
+
+
+async def generate_dataset(
+ chunks: dict[str, TextChunkSchema],
+ filepath: str,
+ save_dataset: bool = True,
+ global_config: dict = {},
+) -> list[dspy.Example]:
+ entity_extractor = TypedEntityRelationshipExtractor(num_refine_turns=1, self_refine=True)
+
+ if global_config.get("use_compiled_dspy_entity_relationship", False):
+ entity_extractor.load(global_config["entity_relationship_module_path"])
+
+ ordered_chunks = list(chunks.items())
+ already_processed = 0
+ already_entities = 0
+ already_relations = 0
+
+ async def _process_single_content(
+ chunk_key_dp: tuple[str, TextChunkSchema]
+ ) -> dspy.Example:
+ nonlocal already_processed, already_entities, already_relations
+ chunk_dp = chunk_key_dp[1]
+ content = chunk_dp["content"]
+ try:
+ prediction = await asyncio.to_thread(entity_extractor, input_text=content)
+ entities, relationships = prediction.entities, prediction.relationships
+ except BadRequestError as e:
+ logger.error(f"Error in TypedEntityRelationshipExtractor: {e}")
+ entities, relationships = [], []
+ example = dspy.Example(
+ input_text=content, entities=entities, relationships=relationships
+ ).with_inputs("input_text")
+ already_entities += len(entities)
+ already_relations += len(relationships)
+ already_processed += 1
+ now_ticks = PROMPTS["process_tickers"][
+ already_processed % len(PROMPTS["process_tickers"])
+ ]
+ print(
+ f"{now_ticks} Processed {already_processed} chunks, {already_entities} entities(duplicated), {already_relations} relations(duplicated)\r",
+ end="",
+ flush=True,
+ )
+ return example
+
+ examples = await asyncio.gather(
+ *[_process_single_content(c) for c in ordered_chunks]
+ )
+ filtered_examples = [
+ example
+ for example in examples
+ if len(example.entities) > 0 and len(example.relationships) > 0
+ ]
+ num_filtered_examples = len(examples) - len(filtered_examples)
+ if save_dataset:
+ with open(filepath, "wb") as f:
+ pickle.dump(filtered_examples, f)
+ logger.info(
+ f"Saved {len(filtered_examples)} examples with keys: {filtered_examples[0].keys()}, filtered {num_filtered_examples} examples"
+ )
+
+ return filtered_examples
+
+
+async def extract_entities_dspy(
+ chunks: dict[str, TextChunkSchema],
+ knwoledge_graph_inst: BaseGraphStorage,
+ entity_vdb: BaseVectorStorage,
+ global_config: dict,
+) -> Union[BaseGraphStorage, None]:
+ entity_extractor = TypedEntityRelationshipExtractor(num_refine_turns=1, self_refine=True)
+
+ if global_config.get("use_compiled_dspy_entity_relationship", False):
+ entity_extractor.load(global_config["entity_relationship_module_path"])
+
+ ordered_chunks = list(chunks.items())
+ already_processed = 0
+ already_entities = 0
+ already_relations = 0
+
+ async def _process_single_content(chunk_key_dp: tuple[str, TextChunkSchema]):
+ nonlocal already_processed, already_entities, already_relations
+ chunk_key = chunk_key_dp[0]
+ chunk_dp = chunk_key_dp[1]
+ content = chunk_dp["content"]
+ try:
+ prediction = await asyncio.to_thread(entity_extractor, input_text=content)
+ entities, relationships = prediction.entities, prediction.relationships
+ except BadRequestError as e:
+ logger.error(f"Error in TypedEntityRelationshipExtractor: {e}")
+ entities, relationships = [], []
+
+ maybe_nodes = defaultdict(list)
+ maybe_edges = defaultdict(list)
+
+ for entity in entities:
+ entity["source_id"] = chunk_key
+ maybe_nodes[entity["entity_name"]].append(entity)
+ already_entities += 1
+
+ for relationship in relationships:
+ relationship["source_id"] = chunk_key
+ maybe_edges[(relationship["src_id"], relationship["tgt_id"])].append(
+ relationship
+ )
+ already_relations += 1
+
+ already_processed += 1
+ now_ticks = PROMPTS["process_tickers"][
+ already_processed % len(PROMPTS["process_tickers"])
+ ]
+ print(
+ f"{now_ticks} Processed {already_processed} chunks, {already_entities} entities(duplicated), {already_relations} relations(duplicated)\r",
+ end="",
+ flush=True,
+ )
+ return dict(maybe_nodes), dict(maybe_edges)
+
+ results = await asyncio.gather(
+ *[_process_single_content(c) for c in ordered_chunks]
+ )
+ print()
+ maybe_nodes = defaultdict(list)
+ maybe_edges = defaultdict(list)
+ for m_nodes, m_edges in results:
+ for k, v in m_nodes.items():
+ maybe_nodes[k].extend(v)
+ for k, v in m_edges.items():
+ maybe_edges[k].extend(v)
+ all_entities_data = await asyncio.gather(
+ *[
+ _merge_nodes_then_upsert(k, v, knwoledge_graph_inst, global_config)
+ for k, v in maybe_nodes.items()
+ ]
+ )
+ await asyncio.gather(
+ *[
+ _merge_edges_then_upsert(k[0], k[1], v, knwoledge_graph_inst, global_config)
+ for k, v in maybe_edges.items()
+ ]
+ )
+ if not len(all_entities_data):
+ logger.warning("Didn't extract any entities, maybe your LLM is not working")
+ return None
+ if entity_vdb is not None:
+ data_for_vdb = {
+ compute_mdhash_id(dp["entity_name"], prefix="ent-"): {
+ "content": dp["entity_name"] + dp["description"],
+ "entity_name": dp["entity_name"],
+ }
+ for dp in all_entities_data
+ }
+ await entity_vdb.upsert(data_for_vdb)
+
+ return knwoledge_graph_inst
diff --git a/rag-web-ui/backend/nano_graphrag/entity_extraction/metric.py b/rag-web-ui/backend/nano_graphrag/entity_extraction/metric.py
new file mode 100644
index 0000000..cfe4b75
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/entity_extraction/metric.py
@@ -0,0 +1,62 @@
+import dspy
+from nano_graphrag.entity_extraction.module import Relationship
+
+
+class AssessRelationships(dspy.Signature):
+ """
+ Assess the similarity between gold and predicted relationships:
+ 1. Match relationships based on src_id and tgt_id pairs, allowing for slight variations in entity names.
+ 2. For matched pairs, compare:
+ a) Description similarity (semantic meaning)
+ b) Weight similarity
+ c) Order similarity
+ 3. Consider unmatched relationships as penalties.
+ 4. Aggregate scores, accounting for precision and recall.
+ 5. Return a final similarity score between 0 (no similarity) and 1 (perfect match).
+
+ Key considerations:
+ - Prioritize matching based on entity pairs over exact string matches.
+ - Use semantic similarity for descriptions rather than exact matches.
+ - Weight the importance of different aspects (e.g., entity matching, description, weight, order).
+ - Balance the impact of matched and unmatched relationships in the final score.
+ """
+
+ gold_relationships: list[Relationship] = dspy.InputField(
+ desc="The gold-standard relationships to compare against."
+ )
+ predicted_relationships: list[Relationship] = dspy.InputField(
+ desc="The predicted relationships to compare against the gold-standard relationships."
+ )
+ similarity_score: float = dspy.OutputField(
+ desc="Similarity score between 0 and 1, with 1 being the highest similarity."
+ )
+
+
+def relationships_similarity_metric(
+ gold: dspy.Example, pred: dspy.Prediction, trace=None
+) -> float:
+ model = dspy.ChainOfThought(AssessRelationships)
+ gold_relationships = [Relationship(**item) for item in gold["relationships"]]
+ predicted_relationships = [Relationship(**item) for item in pred["relationships"]]
+ similarity_score = float(
+ model(
+ gold_relationships=gold_relationships,
+ predicted_relationships=predicted_relationships,
+ ).similarity_score
+ )
+ return similarity_score
+
+
+def entity_recall_metric(
+ gold: dspy.Example, pred: dspy.Prediction, trace=None
+) -> float:
+ true_set = set(item["entity_name"] for item in gold["entities"])
+ pred_set = set(item["entity_name"] for item in pred["entities"])
+ true_positives = len(pred_set.intersection(true_set))
+ false_negatives = len(true_set - pred_set)
+ recall = (
+ true_positives / (true_positives + false_negatives)
+ if (true_positives + false_negatives) > 0
+ else 0
+ )
+ return recall
diff --git a/rag-web-ui/backend/nano_graphrag/entity_extraction/module.py b/rag-web-ui/backend/nano_graphrag/entity_extraction/module.py
new file mode 100644
index 0000000..b04272c
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/entity_extraction/module.py
@@ -0,0 +1,330 @@
+import dspy
+from pydantic import BaseModel, Field
+from nano_graphrag._utils import clean_str
+from nano_graphrag._utils import logger
+
+
+"""
+Obtained from:
+https://github.com/SciPhi-AI/R2R/blob/6e958d1e451c1cb10b6fc868572659785d1091cb/r2r/providers/prompts/defaults.jsonl
+"""
+ENTITY_TYPES = [
+ "PERSON",
+ "ORGANIZATION",
+ "LOCATION",
+ "DATE",
+ "TIME",
+ "MONEY",
+ "PERCENTAGE",
+ "PRODUCT",
+ "EVENT",
+ "LANGUAGE",
+ "NATIONALITY",
+ "RELIGION",
+ "TITLE",
+ "PROFESSION",
+ "ANIMAL",
+ "PLANT",
+ "DISEASE",
+ "MEDICATION",
+ "CHEMICAL",
+ "MATERIAL",
+ "COLOR",
+ "SHAPE",
+ "MEASUREMENT",
+ "WEATHER",
+ "NATURAL_DISASTER",
+ "AWARD",
+ "LAW",
+ "CRIME",
+ "TECHNOLOGY",
+ "SOFTWARE",
+ "HARDWARE",
+ "VEHICLE",
+ "FOOD",
+ "DRINK",
+ "SPORT",
+ "MUSIC_GENRE",
+ "INSTRUMENT",
+ "ARTWORK",
+ "BOOK",
+ "MOVIE",
+ "TV_SHOW",
+ "ACADEMIC_SUBJECT",
+ "SCIENTIFIC_THEORY",
+ "POLITICAL_PARTY",
+ "CURRENCY",
+ "STOCK_SYMBOL",
+ "FILE_TYPE",
+ "PROGRAMMING_LANGUAGE",
+ "MEDICAL_PROCEDURE",
+ "CELESTIAL_BODY",
+]
+
+
+class Entity(BaseModel):
+ entity_name: str = Field(..., description="The name of the entity.")
+ entity_type: str = Field(..., description="The type of the entity.")
+ description: str = Field(
+ ..., description="The description of the entity, in details and comprehensive."
+ )
+ importance_score: float = Field(
+ ...,
+ ge=0,
+ le=1,
+ description="Importance score of the entity. Should be between 0 and 1 with 1 being the most important.",
+ )
+
+ def to_dict(self):
+ return {
+ "entity_name": clean_str(self.entity_name.upper()),
+ "entity_type": clean_str(self.entity_type.upper()),
+ "description": clean_str(self.description),
+ "importance_score": float(self.importance_score),
+ }
+
+
+class Relationship(BaseModel):
+ src_id: str = Field(..., description="The name of the source entity.")
+ tgt_id: str = Field(..., description="The name of the target entity.")
+ description: str = Field(
+ ...,
+ description="The description of the relationship between the source and target entity, in details and comprehensive.",
+ )
+ weight: float = Field(
+ ...,
+ ge=0,
+ le=1,
+ description="The weight of the relationship. Should be between 0 and 1 with 1 being the strongest relationship.",
+ )
+ order: int = Field(
+ ...,
+ ge=1,
+ le=3,
+ description="The order of the relationship. 1 for direct relationships, 2 for second-order, 3 for third-order.",
+ )
+
+ def to_dict(self):
+ return {
+ "src_id": clean_str(self.src_id.upper()),
+ "tgt_id": clean_str(self.tgt_id.upper()),
+ "description": clean_str(self.description),
+ "weight": float(self.weight),
+ "order": int(self.order),
+ }
+
+
+class CombinedExtraction(dspy.Signature):
+ """
+ Given a text document that is potentially relevant to this activity and a list of entity types,
+ identify all entities of those types from the text and all relationships among the identified entities.
+
+ Entity Guidelines:
+ 1. Each entity name should be an actual atomic word from the input text.
+ 2. Avoid duplicates and generic terms.
+ 3. Make sure descriptions are detailed and comprehensive. Use multiple complete sentences for each point below:
+ a). The entity's role or significance in the context
+ b). Key attributes or characteristics
+ c). Relationships to other entities (if applicable)
+ d). Historical or cultural relevance (if applicable)
+ e). Any notable actions or events associated with the entity
+ 4. All entity types from the text must be included.
+ 5. IMPORTANT: Only use entity types from the provided 'entity_types' list. Do not introduce new entity types.
+
+ Relationship Guidelines:
+ 1. Make sure relationship descriptions are detailed and comprehensive. Use multiple complete sentences for each point below:
+ a). The nature of the relationship (e.g., familial, professional, causal)
+ b). The impact or significance of the relationship on both entities
+ c). Any historical or contextual information relevant to the relationship
+ d). How the relationship evolved over time (if applicable)
+ e). Any notable events or actions that resulted from this relationship
+ 2. Include direct relationships (order 1) as well as higher-order relationships (order 2 and 3):
+ a). Direct relationships: Immediate connections between entities.
+ b). Second-order relationships: Indirect effects or connections that result from direct relationships.
+ c). Third-order relationships: Further indirect effects that result from second-order relationships.
+ 3. The "src_id" and "tgt_id" fields must exactly match entity names from the extracted entities list.
+ """
+
+ input_text: str = dspy.InputField(
+ desc="The text to extract entities and relationships from."
+ )
+ entity_types: list[str] = dspy.InputField(
+ desc="List of entity types used for extraction."
+ )
+ entities: list[Entity] = dspy.OutputField(
+ desc="List of entities extracted from the text and the entity types."
+ )
+ relationships: list[Relationship] = dspy.OutputField(
+ desc="List of relationships extracted from the text and the entity types."
+ )
+
+
+class CritiqueCombinedExtraction(dspy.Signature):
+ """
+ Critique the current extraction of entities and relationships from a given text.
+ Focus on completeness, accuracy, and adherence to the provided entity types and extraction guidelines.
+
+ Critique Guidelines:
+ 1. Evaluate if all relevant entities from the input text are captured and correctly typed.
+ 2. Check if entity descriptions are comprehensive and follow the provided guidelines.
+ 3. Assess the completeness of relationship extractions, including higher-order relationships.
+ 4. Verify that relationship descriptions are detailed and follow the provided guidelines.
+ 5. Identify any inconsistencies, errors, or missed opportunities in the current extraction.
+ 6. Suggest specific improvements or additions to enhance the quality of the extraction.
+ """
+
+ input_text: str = dspy.InputField(
+ desc="The original text from which entities and relationships were extracted."
+ )
+ entity_types: list[str] = dspy.InputField(
+ desc="List of valid entity types for this extraction task."
+ )
+ current_entities: list[Entity] = dspy.InputField(
+ desc="List of currently extracted entities to be critiqued."
+ )
+ current_relationships: list[Relationship] = dspy.InputField(
+ desc="List of currently extracted relationships to be critiqued."
+ )
+ entity_critique: str = dspy.OutputField(
+ desc="Detailed critique of the current entities, highlighting areas for improvement for completeness and accuracy.."
+ )
+ relationship_critique: str = dspy.OutputField(
+ desc="Detailed critique of the current relationships, highlighting areas for improvement for completeness and accuracy.."
+ )
+
+
+class RefineCombinedExtraction(dspy.Signature):
+ """
+ Refine the current extraction of entities and relationships based on the provided critique.
+ Improve completeness, accuracy, and adherence to the extraction guidelines.
+
+ Refinement Guidelines:
+ 1. Address all points raised in the entity and relationship critiques.
+ 2. Add missing entities and relationships identified in the critique.
+ 3. Improve entity and relationship descriptions as suggested.
+ 4. Ensure all refinements still adhere to the original extraction guidelines.
+ 5. Maintain consistency between entities and relationships during refinement.
+ 6. Focus on enhancing the overall quality and comprehensiveness of the extraction.
+ """
+
+ input_text: str = dspy.InputField(
+ desc="The original text from which entities and relationships were extracted."
+ )
+ entity_types: list[str] = dspy.InputField(
+ desc="List of valid entity types for this extraction task."
+ )
+ current_entities: list[Entity] = dspy.InputField(
+ desc="List of currently extracted entities to be refined."
+ )
+ current_relationships: list[Relationship] = dspy.InputField(
+ desc="List of currently extracted relationships to be refined."
+ )
+ entity_critique: str = dspy.InputField(
+ desc="Detailed critique of the current entities to guide refinement."
+ )
+ relationship_critique: str = dspy.InputField(
+ desc="Detailed critique of the current relationships to guide refinement."
+ )
+ refined_entities: list[Entity] = dspy.OutputField(
+ desc="List of refined entities, addressing the entity critique and improving upon the current entities."
+ )
+ refined_relationships: list[Relationship] = dspy.OutputField(
+ desc="List of refined relationships, addressing the relationship critique and improving upon the current relationships."
+ )
+
+
+class TypedEntityRelationshipExtractorException(dspy.Module):
+ def __init__(
+ self,
+ predictor: dspy.Module,
+ exception_types: tuple[type[Exception]] = (Exception,),
+ ):
+ super().__init__()
+ self.predictor = predictor
+ self.exception_types = exception_types
+
+ def copy(self):
+ return TypedEntityRelationshipExtractorException(self.predictor)
+
+ def forward(self, **kwargs):
+ try:
+ prediction = self.predictor(**kwargs)
+ return prediction
+
+ except Exception as e:
+ if isinstance(e, self.exception_types):
+ return dspy.Prediction(entities=[], relationships=[])
+
+ raise e
+
+
+class TypedEntityRelationshipExtractor(dspy.Module):
+ def __init__(
+ self,
+ lm: dspy.LM = None,
+ max_retries: int = 3,
+ entity_types: list[str] = ENTITY_TYPES,
+ self_refine: bool = False,
+ num_refine_turns: int = 1,
+ ):
+ super().__init__()
+ self.lm = lm
+ self.entity_types = entity_types
+ self.self_refine = self_refine
+ self.num_refine_turns = num_refine_turns
+
+ self.extractor = dspy.ChainOfThought(
+ signature=CombinedExtraction, max_retries=max_retries
+ )
+ self.extractor = TypedEntityRelationshipExtractorException(
+ self.extractor, exception_types=(ValueError,)
+ )
+
+ if self.self_refine:
+ self.critique = dspy.ChainOfThought(
+ signature=CritiqueCombinedExtraction, max_retries=max_retries
+ )
+ self.refine = dspy.ChainOfThought(
+ signature=RefineCombinedExtraction, max_retries=max_retries
+ )
+
+ def forward(self, input_text: str) -> dspy.Prediction:
+ with dspy.context(lm=self.lm if self.lm is not None else dspy.settings.lm):
+ extraction_result = self.extractor(
+ input_text=input_text, entity_types=self.entity_types
+ )
+
+ current_entities: list[Entity] = extraction_result.entities
+ current_relationships: list[Relationship] = extraction_result.relationships
+
+ if self.self_refine:
+ for _ in range(self.num_refine_turns):
+ critique_result = self.critique(
+ input_text=input_text,
+ entity_types=self.entity_types,
+ current_entities=current_entities,
+ current_relationships=current_relationships,
+ )
+ refined_result = self.refine(
+ input_text=input_text,
+ entity_types=self.entity_types,
+ current_entities=current_entities,
+ current_relationships=current_relationships,
+ entity_critique=critique_result.entity_critique,
+ relationship_critique=critique_result.relationship_critique,
+ )
+ logger.debug(
+ f"entities: {len(current_entities)} | refined_entities: {len(refined_result.refined_entities)}"
+ )
+ logger.debug(
+ f"relationships: {len(current_relationships)} | refined_relationships: {len(refined_result.refined_relationships)}"
+ )
+ current_entities = refined_result.refined_entities
+ current_relationships = refined_result.refined_relationships
+
+ entities = [entity.to_dict() for entity in current_entities]
+ relationships = [
+ relationship.to_dict() for relationship in current_relationships
+ ]
+
+ return dspy.Prediction(entities=entities, relationships=relationships)
diff --git a/rag-web-ui/backend/nano_graphrag/graphrag.py b/rag-web-ui/backend/nano_graphrag/graphrag.py
new file mode 100644
index 0000000..b1d887a
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/graphrag.py
@@ -0,0 +1,382 @@
+import asyncio
+import os
+from dataclasses import asdict, dataclass, field
+from datetime import datetime
+from functools import partial
+from typing import Callable, Dict, List, Optional, Type, Union, cast
+
+
+
+from ._llm import (
+ amazon_bedrock_embedding,
+ create_amazon_bedrock_complete_function,
+ gpt_4o_complete,
+ gpt_4o_mini_complete,
+ openai_embedding,
+ azure_gpt_4o_complete,
+ azure_openai_embedding,
+ azure_gpt_4o_mini_complete,
+)
+from ._op import (
+ chunking_by_token_size,
+ extract_entities,
+ generate_community_report,
+ get_chunks,
+ local_query,
+ global_query,
+ naive_query,
+)
+from ._storage import (
+ JsonKVStorage,
+ NanoVectorDBStorage,
+ NetworkXStorage,
+)
+from ._utils import (
+ EmbeddingFunc,
+ compute_mdhash_id,
+ limit_async_func_call,
+ convert_response_to_json,
+ always_get_an_event_loop,
+ logger,
+ TokenizerWrapper,
+)
+from .base import (
+ BaseGraphStorage,
+ BaseKVStorage,
+ BaseVectorStorage,
+ StorageNameSpace,
+ QueryParam,
+)
+
+
+@dataclass
+class GraphRAG:
+ working_dir: str = field(
+ default_factory=lambda: f"./nano_graphrag_cache_{datetime.now().strftime('%Y-%m-%d-%H:%M:%S')}"
+ )
+ # graph mode
+ enable_local: bool = True
+ enable_naive_rag: bool = False
+
+ # text chunking
+ tokenizer_type: str = "tiktoken" # or 'huggingface'
+ tiktoken_model_name: str = "gpt-4o"
+ huggingface_model_name: str = "bert-base-uncased" # default HF model
+ chunk_func: Callable[
+ [
+ list[list[int]],
+ List[str],
+ TokenizerWrapper,
+ Optional[int],
+ Optional[int],
+ ],
+ List[Dict[str, Union[str, int]]],
+ ] = chunking_by_token_size
+ chunk_token_size: int = 1200
+ chunk_overlap_token_size: int = 100
+
+
+ # entity extraction
+ entity_extract_max_gleaning: int = 1
+ entity_summary_to_max_tokens: int = 500
+
+ # graph clustering
+ graph_cluster_algorithm: str = "leiden"
+ max_graph_cluster_size: int = 10
+ graph_cluster_seed: int = 0xDEADBEEF
+
+ # node embedding
+ node_embedding_algorithm: str = "node2vec"
+ node2vec_params: dict = field(
+ default_factory=lambda: {
+ "dimensions": 1536,
+ "num_walks": 10,
+ "walk_length": 40,
+ "num_walks": 10,
+ "window_size": 2,
+ "iterations": 3,
+ "random_seed": 3,
+ }
+ )
+
+ # community reports
+ special_community_report_llm_kwargs: dict = field(
+ default_factory=lambda: {"response_format": {"type": "json_object"}}
+ )
+
+ # text embedding
+ embedding_func: EmbeddingFunc = field(default_factory=lambda: openai_embedding)
+ embedding_batch_num: int = 32
+ embedding_func_max_async: int = 16
+ query_better_than_threshold: float = 0.2
+
+ # LLM
+ using_azure_openai: bool = False
+ using_amazon_bedrock: bool = False
+ best_model_id: str = "us.anthropic.claude-3-sonnet-20240229-v1:0"
+ cheap_model_id: str = "us.anthropic.claude-3-haiku-20240307-v1:0"
+ best_model_func: callable = gpt_4o_complete
+ best_model_max_token_size: int = 32768
+ best_model_max_async: int = 16
+ cheap_model_func: callable = gpt_4o_mini_complete
+ cheap_model_max_token_size: int = 32768
+ cheap_model_max_async: int = 16
+
+ # entity extraction
+ entity_extraction_func: callable = extract_entities
+
+ # storage
+ key_string_value_json_storage_cls: Type[BaseKVStorage] = JsonKVStorage
+ vector_db_storage_cls: Type[BaseVectorStorage] = NanoVectorDBStorage
+ vector_db_storage_cls_kwargs: dict = field(default_factory=dict)
+ graph_storage_cls: Type[BaseGraphStorage] = NetworkXStorage
+ enable_llm_cache: bool = True
+
+ # extension
+ always_create_working_dir: bool = True
+ addon_params: dict = field(default_factory=dict)
+ convert_response_to_json_func: callable = convert_response_to_json
+
+ def __post_init__(self):
+ _print_config = ",\n ".join([f"{k} = {v}" for k, v in asdict(self).items()])
+ logger.debug(f"GraphRAG init with param:\n\n {_print_config}\n")
+
+ self.tokenizer_wrapper = TokenizerWrapper(
+ tokenizer_type=self.tokenizer_type,
+ model_name=self.tiktoken_model_name if self.tokenizer_type == "tiktoken" else self.huggingface_model_name
+ )
+
+ if self.using_azure_openai:
+ # If there's no OpenAI API key, use Azure OpenAI
+ if self.best_model_func == gpt_4o_complete:
+ self.best_model_func = azure_gpt_4o_complete
+ if self.cheap_model_func == gpt_4o_mini_complete:
+ self.cheap_model_func = azure_gpt_4o_mini_complete
+ if self.embedding_func == openai_embedding:
+ self.embedding_func = azure_openai_embedding
+ logger.info(
+ "Switched the default openai funcs to Azure OpenAI if you didn't set any of it"
+ )
+
+ if self.using_amazon_bedrock:
+ self.best_model_func = create_amazon_bedrock_complete_function(self.best_model_id)
+ self.cheap_model_func = create_amazon_bedrock_complete_function(self.cheap_model_id)
+ self.embedding_func = amazon_bedrock_embedding
+ logger.info(
+ "Switched the default openai funcs to Amazon Bedrock"
+ )
+
+ if not os.path.exists(self.working_dir) and self.always_create_working_dir:
+ logger.info(f"Creating working directory {self.working_dir}")
+ os.makedirs(self.working_dir)
+
+ self.full_docs = self.key_string_value_json_storage_cls(
+ namespace="full_docs", global_config=asdict(self)
+ )
+
+ self.text_chunks = self.key_string_value_json_storage_cls(
+ namespace="text_chunks", global_config=asdict(self)
+ )
+
+ self.llm_response_cache = (
+ self.key_string_value_json_storage_cls(
+ namespace="llm_response_cache", global_config=asdict(self)
+ )
+ if self.enable_llm_cache
+ else None
+ )
+
+ self.community_reports = self.key_string_value_json_storage_cls(
+ namespace="community_reports", global_config=asdict(self)
+ )
+ self.chunk_entity_relation_graph = self.graph_storage_cls(
+ namespace="chunk_entity_relation", global_config=asdict(self)
+ )
+
+ self.embedding_func = limit_async_func_call(self.embedding_func_max_async)(
+ self.embedding_func
+ )
+ self.entities_vdb = (
+ self.vector_db_storage_cls(
+ namespace="entities",
+ global_config=asdict(self),
+ embedding_func=self.embedding_func,
+ meta_fields={"entity_name"},
+ )
+ if self.enable_local
+ else None
+ )
+ self.chunks_vdb = (
+ self.vector_db_storage_cls(
+ namespace="chunks",
+ global_config=asdict(self),
+ embedding_func=self.embedding_func,
+ )
+ if self.enable_naive_rag
+ else None
+ )
+
+ self.best_model_func = limit_async_func_call(self.best_model_max_async)(
+ partial(self.best_model_func, hashing_kv=self.llm_response_cache)
+ )
+ self.cheap_model_func = limit_async_func_call(self.cheap_model_max_async)(
+ partial(self.cheap_model_func, hashing_kv=self.llm_response_cache)
+ )
+
+
+
+ def insert(self, string_or_strings):
+ loop = always_get_an_event_loop()
+ return loop.run_until_complete(self.ainsert(string_or_strings))
+
+ def query(self, query: str, param: QueryParam = QueryParam()):
+ loop = always_get_an_event_loop()
+ return loop.run_until_complete(self.aquery(query, param))
+
+ async def aquery(self, query: str, param: QueryParam = QueryParam()):
+ if param.mode == "local" and not self.enable_local:
+ raise ValueError("enable_local is False, cannot query in local mode")
+ if param.mode == "naive" and not self.enable_naive_rag:
+ raise ValueError("enable_naive_rag is False, cannot query in naive mode")
+ if param.mode == "local":
+ response = await local_query(
+ query,
+ self.chunk_entity_relation_graph,
+ self.entities_vdb,
+ self.community_reports,
+ self.text_chunks,
+ param,
+ self.tokenizer_wrapper,
+ asdict(self),
+ )
+ elif param.mode == "global":
+ response = await global_query(
+ query,
+ self.chunk_entity_relation_graph,
+ self.entities_vdb,
+ self.community_reports,
+ self.text_chunks,
+ param,
+ self.tokenizer_wrapper,
+ asdict(self),
+ )
+ elif param.mode == "naive":
+ response = await naive_query(
+ query,
+ self.chunks_vdb,
+ self.text_chunks,
+ param,
+ self.tokenizer_wrapper,
+ asdict(self),
+ )
+ else:
+ raise ValueError(f"Unknown mode {param.mode}")
+ await self._query_done()
+ return response
+
+ async def ainsert(self, string_or_strings):
+ await self._insert_start()
+ try:
+ if isinstance(string_or_strings, str):
+ string_or_strings = [string_or_strings]
+ # ---------- new docs
+ new_docs = {
+ compute_mdhash_id(c.strip(), prefix="doc-"): {"content": c.strip()}
+ for c in string_or_strings
+ }
+ _add_doc_keys = await self.full_docs.filter_keys(list(new_docs.keys()))
+ new_docs = {k: v for k, v in new_docs.items() if k in _add_doc_keys}
+ if not len(new_docs):
+ logger.warning(f"All docs are already in the storage")
+ return
+ logger.info(f"[New Docs] inserting {len(new_docs)} docs")
+
+ # ---------- chunking
+
+ inserting_chunks = get_chunks(
+ new_docs=new_docs,
+ chunk_func=self.chunk_func,
+ overlap_token_size=self.chunk_overlap_token_size,
+ max_token_size=self.chunk_token_size,
+ tokenizer_wrapper=self.tokenizer_wrapper,
+ )
+
+ _add_chunk_keys = await self.text_chunks.filter_keys(
+ list(inserting_chunks.keys())
+ )
+ inserting_chunks = {
+ k: v for k, v in inserting_chunks.items() if k in _add_chunk_keys
+ }
+ if not len(inserting_chunks):
+ logger.warning(f"All chunks are already in the storage")
+ return
+ logger.info(f"[New Chunks] inserting {len(inserting_chunks)} chunks")
+ if self.enable_naive_rag:
+ logger.info("Insert chunks for naive RAG")
+ await self.chunks_vdb.upsert(inserting_chunks)
+
+ # TODO: don't support incremental update for communities now, so we have to drop all
+ await self.community_reports.drop()
+
+ # ---------- extract/summary entity and upsert to graph
+ logger.info("[Entity Extraction]...")
+ maybe_new_kg = await self.entity_extraction_func(
+ inserting_chunks,
+ knwoledge_graph_inst=self.chunk_entity_relation_graph,
+ entity_vdb=self.entities_vdb,
+ tokenizer_wrapper=self.tokenizer_wrapper,
+ global_config=asdict(self),
+ using_amazon_bedrock=self.using_amazon_bedrock,
+ )
+ if maybe_new_kg is None:
+ logger.warning("No new entities found")
+ return
+ self.chunk_entity_relation_graph = maybe_new_kg
+ # ---------- update clusterings of graph
+ logger.info("[Community Report]...")
+ await self.chunk_entity_relation_graph.clustering(
+ self.graph_cluster_algorithm
+ )
+ await generate_community_report(
+ self.community_reports, self.chunk_entity_relation_graph, self.tokenizer_wrapper, asdict(self)
+ )
+
+ # ---------- commit upsertings and indexing
+ await self.full_docs.upsert(new_docs)
+ await self.text_chunks.upsert(inserting_chunks)
+ finally:
+ await self._insert_done()
+
+ async def _insert_start(self):
+ tasks = []
+ for storage_inst in [
+ self.chunk_entity_relation_graph,
+ ]:
+ if storage_inst is None:
+ continue
+ tasks.append(cast(StorageNameSpace, storage_inst).index_start_callback())
+ await asyncio.gather(*tasks)
+
+ async def _insert_done(self):
+ tasks = []
+ for storage_inst in [
+ self.full_docs,
+ self.text_chunks,
+ self.llm_response_cache,
+ self.community_reports,
+ self.entities_vdb,
+ self.chunks_vdb,
+ self.chunk_entity_relation_graph,
+ ]:
+ if storage_inst is None:
+ continue
+ tasks.append(cast(StorageNameSpace, storage_inst).index_done_callback())
+ await asyncio.gather(*tasks)
+
+ async def _query_done(self):
+ tasks = []
+ for storage_inst in [self.llm_response_cache]:
+ if storage_inst is None:
+ continue
+ tasks.append(cast(StorageNameSpace, storage_inst).index_done_callback())
+ await asyncio.gather(*tasks)
diff --git a/rag-web-ui/backend/nano_graphrag/prompt.py b/rag-web-ui/backend/nano_graphrag/prompt.py
new file mode 100644
index 0000000..a6432db
--- /dev/null
+++ b/rag-web-ui/backend/nano_graphrag/prompt.py
@@ -0,0 +1,305 @@
+"""
+GraphRAG core prompts (Chinese, aerospace-oriented).
+"""
+
+GRAPH_FIELD_SEP = ""
+PROMPTS = {}
+
+PROMPTS[
+ "claim_extraction"
+] = """-任务定位-
+你是航天知识情报分析助手,负责从文本中抽取实体相关的主张/断言(claim)。
+
+-目标-
+给定输入文本、实体约束和主张说明,抽取满足条件的实体及其对应主张,结果必须可溯源。
+
+-执行步骤-
+1. 先识别满足实体约束的命名实体。实体约束可能是实体名称列表,也可能是实体类型列表。
+2. 对步骤1中的每个实体,抽取其作为主语的主张。每条主张需输出:
+- Subject: 主张主体实体名(大写;必须来自步骤1)
+- Object: 客体实体名(大写;未知时使用 **NONE**)
+- Claim Type: 主张类型(大写;应可复用)
+- Claim Status: **TRUE**、**FALSE** 或 **SUSPECTED**
+- Claim Description: 说明主张的依据、逻辑和关键证据
+- Claim Date: 起止时间(ISO-8601);未知时为 **NONE**
+- Claim Source Text: 与主张直接相关的原文引文(尽量完整)
+
+格式要求:
+({tuple_delimiter}{tuple_delimiter}{tuple_delimiter}{tuple_delimiter}{tuple_delimiter}{tuple_delimiter}{tuple_delimiter})
+
+3. 使用 **{record_delimiter}** 连接所有记录。
+4. 结束时输出 {completion_delimiter}。
+
+-约束-
+- 仅基于输入文本,不得编造。
+- 航天语境优先(如航天器、推进系统、姿态控制、任务阶段、地面系统、试验事件、故障模式等)。
+
+-输入-
+Entity specification: {entity_specs}
+Claim description: {claim_description}
+Text: {input_text}
+Output: """
+
+PROMPTS[
+ "community_report"
+] = """你是航天领域知识图谱分析助手,负责为一个社区(community)生成结构化研判报告。
+
+# 目标
+根据给定的实体、关系和可选主张,输出可用于技术评审与任务决策的社区报告。
+
+# 报告结构
+必须返回 JSON 字符串,结构如下:
+{
+ "title": <标题>,
+ "summary": <执行摘要>,
+ "rating": <0-10 浮点评分>,
+ "rating_explanation": <评分说明>,
+ "findings": [
+ {
+ "summary": <要点小结>,
+ "explanation": <详细说明>
+ }
+ ]
+}
+
+# 字段要求
+- title: 简洁且具体,尽量包含代表性实体。
+- summary: 说明社区整体结构、核心实体、关键关系与主要风险/价值。
+- rating: 社区影响度/风险度评分(0-10)。
+- rating_explanation: 单句说明评分依据。
+- findings: 5-10 条关键发现,覆盖技术链路、任务影响、可靠性风险、协同关系等。
+
+# 领域要求(航天)
+优先关注以下维度:
+- 任务阶段:论证、研制、总装、测试、发射、入轨、在轨运行、回收/退役
+- 系统层级:航天器、有效载荷、推进系统、姿态控制、测控通信、地面系统
+- 关键指标:推力、比冲、功率、带宽、精度、寿命、可靠性、故障率
+- 风险与依赖:单点故障、接口依赖、时序耦合、供应链与试验验证缺口
+
+# 证据约束
+- 仅使用输入中可证据化信息。
+- 无证据内容不得写入。
+- 若信息不足,应明确指出不确定性与缺失点。
+
+# 输入
+Text:
+```
+{input_text}
+```
+
+Output:
+"""
+
+PROMPTS[
+ "entity_extraction"
+] = """-任务目标-
+给定文本与实体类型列表,识别所有相关实体,并抽取实体间“明确存在”的关系。
+
+-实体类型约束-
+- entity_type 必须来自给定集合:[{entity_types}]
+- 若无法确定,使用最接近类型,不可臆造新类型
+
+-关系类型约束-
+关系描述必须以“关系类型=<类型>;依据=<说明>”开头。
+关系类型从以下集合中选择:
+[组成, 隶属, 控制, 被控制, 供能, 支撑, 测量, 感知, 执行, 通信, 影响, 制约, 因果, 时序前后, 协同, 风险关联, 其他]
+
+-执行步骤-
+1. 抽取实体。每个实体输出:
+- entity_name: 实体名(保留原文专有名词;必要时标准化)
+- entity_type: 实体类型(必须在给定集合中)
+- entity_description: 面向航天任务语境的实体描述(属性、职责、行为)
+
+实体格式:
+("entity"{tuple_delimiter}{tuple_delimiter}{tuple_delimiter})
+
+2. 在步骤1实体中,抽取“证据充分且语义明确”的关系对(source_entity, target_entity)。
+每条关系输出:
+- source_entity
+- target_entity
+- relationship_description: 必须以“关系类型=<类型>;依据=<说明>”开头
+- relationship_strength: 1-10 数值,表示关系强度
+
+关系格式:
+("relationship"{tuple_delimiter}{tuple_delimiter}{tuple_delimiter}{tuple_delimiter})
+
+3. 结果使用 **{record_delimiter}** 拼接。
+4. 结束时输出 {completion_delimiter}。
+
+-质量规则-
+- 只抽取文本中可直接支持的实体和关系。
+- 不输出模糊、猜测或无依据关系。
+- 对航天语义优先:航天器、分系统、任务阶段、参数指标、故障模式、试验事件。
+
+-输入-
+Entity_types: {entity_types}
+Text: {input_text}
+Output:
+"""
+
+PROMPTS[
+ "summarize_entity_descriptions"
+] = """你是航天知识库整理助手。
+给定一个或两个实体名称,以及若干描述片段,请合并为一段一致、完整、可复用的摘要。
+
+要求:
+- 覆盖所有有效信息
+- 若描述冲突,给出最一致、最保守的综合结论
+- 使用第三人称
+- 保留实体名,避免指代不清
+- 优先保留任务阶段、技术指标、系统依赖和风险信息
+
+#######
+-输入数据-
+Entities: {entity_name}
+Description List: {description_list}
+#######
+Output:
+"""
+
+PROMPTS[
+ "entiti_continue_extraction"
+] = """上一轮可能遗漏了实体或关系。请仅补充遗漏项,严格沿用既定输出格式,不要重复已输出记录:"""
+
+PROMPTS[
+ "entiti_if_loop_extraction"
+] = """请判断是否仍有遗漏实体或关系。仅回答 YES 或 NO。"""
+
+PROMPTS["DEFAULT_ENTITY_TYPES"] = [
+ "航天器",
+ "任务",
+ "任务阶段",
+ "有效载荷",
+ "推进系统",
+ "姿态控制系统",
+ "测控通信系统",
+ "电源系统",
+ "热控系统",
+ "结构机构",
+ "传感器",
+ "执行机构",
+ "地面系统",
+ "组织机构",
+ "试验事件",
+ "故障模式",
+ "参数指标",
+ "轨道",
+ "地点",
+ "时间",
+]
+PROMPTS["DEFAULT_TUPLE_DELIMITER"] = "<|>"
+PROMPTS["DEFAULT_RECORD_DELIMITER"] = "##"
+PROMPTS["DEFAULT_COMPLETION_DELIMITER"] = "<|COMPLETE|>"
+
+PROMPTS[
+ "local_rag_response"
+] = """---角色---
+你是航天领域图谱问答助手,擅长基于局部子图进行多跳推理。
+
+---任务---
+根据输入的数据表(实体、关系、证据片段)回答用户问题。
+
+---回答要求---
+1. 先给结论,再给推理链路。
+2. 推理链路至少包含:
+- 关键实体
+- 关键关系
+- 中间推断
+- 最终结论
+3. 若证据不足,明确说明“证据不足以得出结论”。
+4. 严禁编造。
+5. 使用中文、专业且简洁。
+
+---目标长度与格式---
+{response_type}
+
+---输入数据表---
+{context_data}
+
+---输出格式建议---
+- 结论
+- 推理链路
+- 证据与不确定性
+"""
+
+PROMPTS[
+ "global_map_rag_points"
+] = """---角色---
+你是航天知识全局研判助手,负责从社区级信息中提炼关键观点。
+
+---任务---
+根据输入数据表,输出一组可用于后续全局汇总的关键点。
+
+---输出要求---
+- 必须输出 JSON:
+{
+ "points": [
+ {"description": "观点描述", "score": 0-100整数}
+ ]
+}
+- description: 观点需可证据化,优先覆盖跨系统影响、任务阶段耦合、技术风险和性能趋势。
+- score: 对回答用户问题的重要性分值。
+- 若无法回答,输出 1 条 score=0 且明确说明信息不足。
+- 仅使用输入证据,不得编造。
+
+---输入数据表---
+{context_data}
+"""
+
+PROMPTS[
+ "global_reduce_rag_response"
+] = """---角色---
+你是航天领域总师级分析助手,负责融合多位分析员报告并给出全局结论。
+
+---任务---
+根据按重要性降序排列的分析员报告,输出面向决策的综合回答。
+
+---要求---
+1. 先给总体结论,再给分项分析(趋势、共性风险、关键差异、建议动作)。
+2. 报告融合时去重、去噪,保留高证据密度信息。
+3. 必须指出不确定性与信息缺口。
+4. 仅基于输入报告,不得编造。
+5. 使用中文,风格专业、严谨。
+
+---目标长度与格式---
+{response_type}
+
+---分析员报告---
+{report_data}
+"""
+
+PROMPTS[
+ "naive_rag_response"
+] = """你是航天知识问答助手。
+下面是可用知识:
+{content_data}
+---
+请基于上述知识回答用户问题,要求:
+- 回答准确、简洁、专业
+- 若信息不足,明确说明缺失点
+- 不得编造
+---目标长度与格式---
+{response_type}
+"""
+
+PROMPTS["fail_response"] = "抱歉,当前无法基于现有信息回答该问题。"
+
+PROMPTS["process_tickers"] = ["-", "\\", "|", "/"]
+
+PROMPTS["default_text_separator"] = [
+ "\n\n",
+ "\r\n\r\n",
+ "\n",
+ "\r\n",
+ "。",
+ ".",
+ ".",
+ "!",
+ "!",
+ "?",
+ "?",
+ " ",
+ "\t",
+ "\u3000",
+ "\u200b",
+]
diff --git a/rag-web-ui/backend/requirements.txt b/rag-web-ui/backend/requirements.txt
new file mode 100644
index 0000000..e1ef6b7
--- /dev/null
+++ b/rag-web-ui/backend/requirements.txt
@@ -0,0 +1,33 @@
+fastapi>=0.104.1
+uvicorn>=0.24.0
+python-jose>=3.3.0
+python-multipart>=0.0.6
+langchain>=0.3.0
+langchain-community>=0.0.10
+langchain-openai>=0.3.3
+langchain-chroma>=0.0.5
+chromadb>=0.6.3
+langchain-qdrant>=0.2.0
+chroma-hnswlib>=0.7.3
+BCrypt==4.0.1
+SQLAlchemy>=2.0.23
+alembic>=1.12.1
+mysql-connector-python>=8.0.33
+minio>=7.2.0
+python-docx>=0.8.11
+pypdf>=3.0.0
+unstructured>=0.10.30
+markdown>=3.0.0
+unstructured[md]>=0.10.0
+openai>=1.30.0
+email-validator
+dashscope>=1.13.6
+langchain-deepseek==0.1.1
+langchain-ollama==0.2.3
+docx2txt==0.8
+neo4j>=5.22.0
+tiktoken>=0.7.0
+networkx>=3.2
+graspologic>=3.4.1
+nano-vectordb>=0.0.4
+tenacity
diff --git a/rag-web-ui/backend/tests/__init__.py b/rag-web-ui/backend/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/rag-web-ui/backend/tests/test_testing_pipeline.py b/rag-web-ui/backend/tests/test_testing_pipeline.py
new file mode 100644
index 0000000..0858c0d
--- /dev/null
+++ b/rag-web-ui/backend/tests/test_testing_pipeline.py
@@ -0,0 +1,127 @@
+from app.services.testing_pipeline.pipeline import run_testing_pipeline
+from app.services.testing_pipeline.rules import REQUIREMENT_TYPES
+
+
+def test_requirement_types_cover_all_docx_types() -> None:
+ expected_types = {
+ "功能测试",
+ "性能测试",
+ "外部接口测试",
+ "人机交互界面测试",
+ "强度测试",
+ "余量测试",
+ "可靠性测试",
+ "安全性测试",
+ "恢复性测试",
+ "边界测试",
+ "安装性测试",
+ "互操作性测试",
+ "敏感性测试",
+ "测试充分性要求",
+ }
+ assert set(REQUIREMENT_TYPES) == expected_types
+
+
+def test_identify_requirement_type_for_interface_requirement() -> None:
+ result = run_testing_pipeline(
+ user_requirement_text="请针对外部接口的入参格式、出参内容和异常返回进行测试分解",
+ debug=True,
+ use_model_generation=False,
+ )
+
+ assert result["requirement_type"] == "外部接口测试"
+ assert len(result["test_items"]["normal"]) > 0
+ assert len(result["test_items"]["abnormal"]) > 0
+
+
+def test_unknown_type_fallback_still_generates_content() -> None:
+ result = run_testing_pipeline(
+ user_requirement_text="请给我一份跨域的综合验证策略,重点关注体系化和可追溯性。",
+ debug=True,
+ use_model_generation=False,
+ )
+
+ normal_item_ids = [item["id"] for item in result["test_items"]["normal"]]
+ abnormal_item_ids = [item["id"] for item in result["test_items"]["abnormal"]]
+ normal_case_item_ids = [case["item_id"] for case in result["test_cases"]["normal"]]
+ abnormal_case_item_ids = [case["item_id"] for case in result["test_cases"]["abnormal"]]
+
+ assert result["requirement_type"] in {"未知类型", "功能测试", "边界测试", "性能测试"}
+ assert len(result["test_items"]["normal"]) > 0
+ assert len(result["test_items"]["abnormal"]) > 0
+ assert set(normal_item_ids).issubset(set(normal_case_item_ids))
+ assert set(abnormal_item_ids).issubset(set(abnormal_case_item_ids))
+
+
+def test_output_mapping_and_formatting() -> None:
+ result = run_testing_pipeline(
+ user_requirement_text="系统需要验证边界值、非法输入和状态转换端点处理能力",
+ debug=True,
+ use_model_generation=False,
+ )
+
+ normal_item_ids = {item["id"] for item in result["test_items"]["normal"]}
+ normal_case_item_ids = [case["item_id"] for case in result["test_cases"]["normal"]]
+ normal_case_ids = [case["id"] for case in result["test_cases"]["normal"]]
+ normal_expected_ids = [expected["id"] for expected in result["expected_results"]["normal"]]
+ placeholders = {case["expected_result_placeholder"] for case in result["test_cases"]["normal"]}
+ step_lengths = [len(case["operation_steps"]) for case in result["test_cases"]["normal"]]
+ item_case_counts = {item_id: 0 for item_id in normal_item_ids}
+ for item_id in normal_case_item_ids:
+ if item_id in item_case_counts:
+ item_case_counts[item_id] += 1
+ step_names = [log["step_name"] for log in result["step_logs"][:3]]
+
+ assert set(normal_case_item_ids).issubset(normal_item_ids)
+ assert normal_item_ids.issubset(set(normal_case_item_ids))
+ assert normal_expected_ids == normal_case_ids
+ assert all(case_id.startswith(f"{item_id}-C") for case_id, item_id in zip(normal_case_ids, normal_case_item_ids))
+ assert all(length >= 5 for length in step_lengths)
+ assert any(count >= 2 for count in item_case_counts.values())
+ assert placeholders.issubset(
+ {
+ "{{return_value}}",
+ "{{state_change}}",
+ "{{error_message}}",
+ "{{data_persistence}}",
+ "{{ui_display}}",
+ }
+ )
+ assert step_names == [
+ "identify-requirement-type",
+ "decompose-test-items",
+ "generate-test-cases",
+ ]
+ assert "**测试项**" in result["formatted_output"]
+ assert "\n\n**正常测试**:" in result["formatted_output"]
+ assert "\n\n**异常测试**:" in result["formatted_output"]
+ assert "**测试用例**" in result["formatted_output"]
+ assert "**预期成果**" in result["formatted_output"]
+
+
+def test_identify_adequacy_requirement_type() -> None:
+ result = run_testing_pipeline(
+ user_requirement_text="请根据测试充分性要求检查需求覆盖率、语句覆盖率和分支覆盖率是否达到100%",
+ debug=True,
+ use_model_generation=False,
+ )
+
+ assert result["requirement_type"] == "测试充分性要求"
+ assert len(result["test_items"]["normal"]) > 0
+ assert len(result["test_items"]["abnormal"]) > 0
+
+
+def test_output_should_not_repeat_raw_requirement_context() -> None:
+ result = run_testing_pipeline(
+ user_requirement_text=(
+ "为以下需求生成测试用例:根据作战需求,对电场综合防护设备的静电场防护进行启停8控制,"
+ "轴频电场防护出航即开启,同时对外加电流阴极保护设备进行远程控制"
+ ),
+ debug=True,
+ use_model_generation=False,
+ )
+
+ formatted = result["formatted_output"]
+ assert "需求上下文" not in formatted
+ assert "为以下需求生成测试用例" not in formatted
+ assert "启停8控制" in formatted
diff --git a/rag-web-ui/backend/uploads/README.md b/rag-web-ui/backend/uploads/README.md
new file mode 100644
index 0000000..15aa7ec
--- /dev/null
+++ b/rag-web-ui/backend/uploads/README.md
@@ -0,0 +1,97 @@
+
+
+
+
+
+ Chroma - the open-source embedding database.
+ The fastest way to build Python or JavaScript LLM apps with memory!
+
+
+
+
+
+ |
+
+
+ |
+
+ Docs
+ |
+
+ Homepage
+
+
+
+
+```bash
+pip install chromadb # python client
+# for javascript, npm install chromadb!
+# for client-server mode, chroma run --path /chroma_db_path
+```
+
+The core API is only 4 functions (run our [💡 Google Colab](https://colab.research.google.com/drive/1QEzFyqnoFxq7LUGyP1vzR4iLt9PpCDXv?usp=sharing) or [Replit template](https://replit.com/@swyx/BasicChromaStarter?v=1)):
+
+```python
+import chromadb
+# setup Chroma in-memory, for easy prototyping. Can add persistence easily!
+client = chromadb.Client()
+
+# Create collection. get_collection, get_or_create_collection, delete_collection also available!
+collection = client.create_collection("all-my-documents")
+
+# Add docs to the collection. Can also update and delete. Row-based API coming soon!
+collection.add(
+ documents=["This is document1", "This is document2"], # we handle tokenization, embedding, and indexing automatically. You can skip that and add your own embeddings as well
+ metadatas=[{"source": "notion"}, {"source": "google-docs"}], # filter on these!
+ ids=["doc1", "doc2"], # unique for each doc
+)
+
+# Query/search 2 most similar results. You can also .get by id
+results = collection.query(
+ query_texts=["This is a query document"],
+ n_results=2,
+ # where={"metadata_field": "is_equal_to_this"}, # optional filter
+ # where_document={"$contains":"search_string"} # optional filter
+)
+```
+
+## Features
+- __Simple__: Fully-typed, fully-tested, fully-documented == happiness
+- __Integrations__: [`🦜️🔗 LangChain`](https://blog.langchain.dev/langchain-chroma/) (python and js), [`🦙 LlamaIndex`](https://twitter.com/atroyn/status/1628557389762007040) and more soon
+- __Dev, Test, Prod__: the same API that runs in your python notebook, scales to your cluster
+- __Feature-rich__: Queries, filtering, density estimation and more
+- __Free & Open Source__: Apache 2.0 Licensed
+
+## Use case: ChatGPT for ______
+
+For example, the `"Chat your data"` use case:
+1. Add documents to your database. You can pass in your own embeddings, embedding function, or let Chroma embed them for you.
+2. Query relevant documents with natural language.
+3. Compose documents into the context window of an LLM like `GPT3` for additional summarization or analysis.
+
+## Embeddings?
+
+What are embeddings?
+
+- [Read the guide from OpenAI](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings)
+- __Literal__: Embedding something turns it from image/text/audio into a list of numbers. 🖼️ or 📄 => `[1.2, 2.1, ....]`. This process makes documents "understandable" to a machine learning model.
+- __By analogy__: An embedding represents the essence of a document. This enables documents and queries with the same essence to be "near" each other and therefore easy to find.
+- __Technical__: An embedding is the latent-space position of a document at a layer of a deep neural network. For models trained specifically to embed data, this is the last layer.
+- __A small example__: If you search your photos for "famous bridge in San Francisco". By embedding this query and comparing it to the embeddings of your photos and their metadata - it should return photos of the Golden Gate Bridge.
+
+Embeddings databases (also known as **vector databases**) store embeddings and allow you to search by nearest neighbors rather than by substrings like a traditional database. By default, Chroma uses [Sentence Transformers](https://docs.trychroma.com/guides/embeddings#default:-all-minilm-l6-v2) to embed for you but you can also use OpenAI embeddings, Cohere (multilingual) embeddings, or your own.
+
+## Get involved
+
+Chroma is a rapidly developing project. We welcome PR contributors and ideas for how to improve the project.
+- [Join the conversation on Discord](https://discord.gg/MMeYNTmh3x) - `#contributing` channel
+- [Review the 🛣️ Roadmap and contribute your ideas](https://docs.trychroma.com/roadmap)
+- [Grab an issue and open a PR](https://github.com/chroma-core/chroma/issues) - [`Good first issue tag`](https://github.com/chroma-core/chroma/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
+- [Read our contributing guide](https://docs.trychroma.com/contributing)
+
+**Release Cadence**
+We currently release new tagged versions of the `pypi` and `npm` packages on Mondays. Hotfixes go out at any time during the week.
+
+## License
+
+[Apache 2.0](./LICENSE)
diff --git a/rag-web-ui/docker-compose.dev.yml b/rag-web-ui/docker-compose.dev.yml
new file mode 100644
index 0000000..a55ce0c
--- /dev/null
+++ b/rag-web-ui/docker-compose.dev.yml
@@ -0,0 +1,135 @@
+services:
+ nginx-dev:
+ image: nginx:alpine
+ ports:
+ - "80:80"
+ volumes:
+ - ./nginx.dev.conf:/etc/nginx/nginx.conf:ro
+ depends_on:
+ - frontend
+ - backend
+ - minio
+ networks:
+ - app_network
+ healthcheck:
+ test: ["CMD", "nginx", "-t"]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+ restart: unless-stopped
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "3"
+
+ backend:
+ build:
+ context: ./backend
+ dockerfile: Dockerfile.dev
+ ports:
+ - "8000:8000"
+ env_file:
+ - .env
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ environment:
+ - DEBUG=1
+ - PYTHONUNBUFFERED=1
+ - MYSQL_SERVER=db
+ - CHROMA_DB_HOST=chromadb
+ - CHROMA_DB_PORT=8000
+ - MINIO_ENDPOINT=minio:9000
+ - NEO4J_URL=bolt://host.docker.internal:7687
+ volumes:
+ - ./backend:/app
+ - ./uploads:/app/uploads
+ networks:
+ - app_network
+ depends_on:
+ - db
+ - chromadb
+ - minio
+ restart: on-failure
+ deploy:
+ restart_policy:
+ condition: on-failure
+ delay: 5s
+ max_attempts: 3
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "3"
+
+ frontend:
+ build:
+ context: ./frontend
+ dockerfile: Dockerfile.dev
+ environment:
+ - WATCHPACK_POLLING=true
+ - CHOKIDAR_USEPOLLING=true
+ - NODE_ENV=development
+ - NEXT_PUBLIC_API_URL=http://localhost/api
+ ports:
+ - "3000:3000"
+ volumes:
+ - ./frontend:/app
+ - /app/node_modules
+ env_file:
+ - .env
+ networks:
+ - app_network
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "3"
+
+ db:
+ image: mysql:8.0
+ command: --default-authentication-plugin=mysql_native_password
+ environment:
+ - MYSQL_ROOT_PASSWORD=root
+ - MYSQL_DATABASE=ragagent
+ - MYSQL_USER=ragagent
+ - MYSQL_PASSWORD=ragagent
+ - TZ=Asia/Shanghai
+ ports:
+ - "3306:3306"
+ volumes:
+ - mysql_data:/var/lib/mysql
+ networks:
+ - app_network
+
+ chromadb:
+ image: chromadb/chroma:latest
+ ports:
+ - "8001:8000"
+ volumes:
+ - chroma_data:/chroma/chroma
+ networks:
+ - app_network
+
+ minio:
+ image: minio/minio:latest
+ ports:
+ - "9000:9000"
+ - "9001:9001"
+ environment:
+ - MINIO_ROOT_USER=minioadmin
+ - MINIO_ROOT_PASSWORD=minioadmin
+ volumes:
+ - minio_data:/data
+ command: server --console-address ":9001" /data
+ networks:
+ - app_network
+
+volumes:
+ mysql_data:
+ chroma_data:
+ minio_data:
+
+networks:
+ app_network:
+ driver: bridge
diff --git a/rag-web-ui/docker-compose.yml b/rag-web-ui/docker-compose.yml
new file mode 100644
index 0000000..78bcf38
--- /dev/null
+++ b/rag-web-ui/docker-compose.yml
@@ -0,0 +1,130 @@
+services:
+ backend:
+ build: ./backend
+ env_file:
+ - .env
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ environment:
+ - MYSQL_SERVER=db
+ - CHROMA_DB_HOST=chromadb
+ - CHROMA_DB_PORT=8000
+ - MINIO_ENDPOINT=minio:9000
+ - NEO4J_URL=bolt://host.docker.internal:7687
+ volumes:
+ - ./backend:/app
+ - ./uploads:/app/uploads
+ networks:
+ - app_network
+ depends_on:
+ db:
+ condition: service_healthy
+ chromadb:
+ condition: service_started
+ minio:
+ condition: service_started
+ restart: on-failure
+ deploy:
+ restart_policy:
+ condition: on-failure
+ delay: 5s
+ max_attempts: 3
+
+ frontend:
+ build: ./frontend
+ volumes:
+ - ./frontend:/app
+ - /app/node_modules
+ networks:
+ - app_network
+
+ db:
+ image: mysql:8.0
+ command: --default-authentication-plugin=mysql_native_password
+ environment:
+ - MYSQL_ROOT_PASSWORD=root
+ - MYSQL_DATABASE=ragagent
+ - MYSQL_USER=ragagent
+ - MYSQL_PASSWORD=ragagent
+ - TZ=Asia/Shanghai
+ ports:
+ - "3306:3306"
+ volumes:
+ - mysql_data:/var/lib/mysql
+ networks:
+ - app_network
+ healthcheck:
+ test:
+ [
+ "CMD",
+ "mysqladmin",
+ "ping",
+ "-h",
+ "localhost",
+ "-u",
+ "$$MYSQL_USER",
+ "--password=$$MYSQL_PASSWORD",
+ ]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+ start_period: 10s
+
+ chromadb:
+ image: chromadb/chroma:latest
+ ports:
+ - "8001:8000"
+ volumes:
+ - chroma_data:/chroma/chroma
+ networks:
+ - app_network
+
+ # For Qdrant, Remove the comment and run the following command to start the service
+ # qdrant:
+ # image: qdrant/qdrant:latest
+ # ports:
+ # - "6333:6333" # REST API
+ # - "6334:6334" # GRPC
+ # volumes:
+ # - qdrant_data:/qdrant/storage
+ # environment:
+ # - QDRANT_ALLOW_RECOVERY_MODE=true
+ # networks:
+ # - app_network
+
+ minio:
+ image: minio/minio:latest
+ ports:
+ - "9000:9000" # API port
+ - "9001:9001" # Console port
+ environment:
+ - MINIO_ROOT_USER=minioadmin
+ - MINIO_ROOT_PASSWORD=minioadmin
+ volumes:
+ - minio_data:/data
+ command: server --console-address ":9001" /data
+ networks:
+ - app_network
+
+ nginx:
+ image: nginx:alpine
+ ports:
+ - "80:80"
+ volumes:
+ - ./nginx.conf:/etc/nginx/nginx.conf:ro
+ depends_on:
+ - frontend
+ - backend
+ - minio
+ networks:
+ - app_network
+
+volumes:
+ mysql_data:
+ chroma_data:
+ minio_data:
+ # qdrant_data:
+
+networks:
+ app_network:
+ driver: bridge
diff --git a/rag-web-ui/frontend/.dockerignore b/rag-web-ui/frontend/.dockerignore
new file mode 100644
index 0000000..883f4d4
--- /dev/null
+++ b/rag-web-ui/frontend/.dockerignore
@@ -0,0 +1,9 @@
+node_modules
+.next
+.git
+.gitignore
+Dockerfile.dev
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+pnpm-debug.log*
diff --git a/rag-web-ui/frontend/.gitignore b/rag-web-ui/frontend/.gitignore
new file mode 100644
index 0000000..5ef6a52
--- /dev/null
+++ b/rag-web-ui/frontend/.gitignore
@@ -0,0 +1,41 @@
+# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
+
+# dependencies
+/node_modules
+/.pnp
+.pnp.*
+.yarn/*
+!.yarn/patches
+!.yarn/plugins
+!.yarn/releases
+!.yarn/versions
+
+# testing
+/coverage
+
+# next.js
+/.next/
+/out/
+
+# production
+/build
+
+# misc
+.DS_Store
+*.pem
+
+# debug
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+.pnpm-debug.log*
+
+# env files (can opt-in for committing if needed)
+.env*
+
+# vercel
+.vercel
+
+# typescript
+*.tsbuildinfo
+next-env.d.ts
diff --git a/rag-web-ui/frontend/Dockerfile b/rag-web-ui/frontend/Dockerfile
new file mode 100644
index 0000000..b2a50ce
--- /dev/null
+++ b/rag-web-ui/frontend/Dockerfile
@@ -0,0 +1,60 @@
+FROM node:20-alpine AS base
+
+### Dependencies ###
+FROM base AS deps
+RUN apk add --no-cache libc6-compat git
+
+# Setup pnpm environment
+ENV PNPM_HOME="/pnpm"
+ENV PATH="$PNPM_HOME:$PATH"
+RUN npm install -g pnpm@10.28.2
+
+WORKDIR /app
+
+COPY package.json pnpm-lock.yaml ./
+RUN pnpm install --frozen-lockfile --prefer-frozen-lockfile
+
+# Builder
+FROM base AS builder
+
+RUN npm install -g pnpm@10.28.2
+
+WORKDIR /app
+
+COPY --from=deps /app/node_modules ./node_modules
+COPY . .
+RUN pnpm build
+
+
+### Production image runner ###
+FROM base AS runner
+
+# Set NODE_ENV to production
+ENV NODE_ENV production
+
+# Disable Next.js telemetry
+# Learn more here: https://nextjs.org/telemetry
+ENV NEXT_TELEMETRY_DISABLED 1
+
+# Set correct permissions for nextjs user and don't run as root
+RUN addgroup nodejs
+RUN adduser -SDH nextjs
+RUN mkdir .next
+RUN chown nextjs:nodejs .next
+
+# Automatically leverage output traces to reduce image size
+# https://nextjs.org/docs/advanced-features/output-file-tracing
+COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
+COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
+COPY --from=builder --chown=nextjs:nodejs /app/public ./public
+
+USER nextjs
+
+# Exposed port (for orchestrators and dynamic reverse proxies)
+EXPOSE 3000
+ENV PORT 3000
+ENV HOSTNAME "0.0.0.0"
+HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 CMD [ "wget", "-q", "-O", "/dev/null", "http://127.0.0.1:3000" ]
+
+# Run the nextjs app
+CMD ["node", "server.js"]
\ No newline at end of file
diff --git a/rag-web-ui/frontend/Dockerfile.dev b/rag-web-ui/frontend/Dockerfile.dev
new file mode 100644
index 0000000..288a1fa
--- /dev/null
+++ b/rag-web-ui/frontend/Dockerfile.dev
@@ -0,0 +1,21 @@
+FROM node:20-alpine
+
+WORKDIR /app
+
+# Install pnpm
+RUN npm install -g pnpm@10.28.2
+
+# Copy package files
+COPY package.json pnpm-lock.yaml ./
+
+# Install dependencies
+RUN pnpm install --frozen-lockfile
+
+# Copy the rest of the application
+COPY . .
+
+# Expose the port
+EXPOSE 3000
+
+# Start the development server, attention: this is for development, not for production
+CMD ["pnpm", "dev"]
diff --git a/rag-web-ui/frontend/README.md b/rag-web-ui/frontend/README.md
new file mode 100644
index 0000000..e215bc4
--- /dev/null
+++ b/rag-web-ui/frontend/README.md
@@ -0,0 +1,36 @@
+This is a [Next.js](https://nextjs.org) project bootstrapped with [`create-next-app`](https://nextjs.org/docs/app/api-reference/cli/create-next-app).
+
+## Getting Started
+
+First, run the development server:
+
+```bash
+npm run dev
+# or
+yarn dev
+# or
+pnpm dev
+# or
+bun dev
+```
+
+Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
+
+You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file.
+
+This project uses [`next/font`](https://nextjs.org/docs/app/building-your-application/optimizing/fonts) to automatically optimize and load [Geist](https://vercel.com/font), a new font family for Vercel.
+
+## Learn More
+
+To learn more about Next.js, take a look at the following resources:
+
+- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
+- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
+
+You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js) - your feedback and contributions are welcome!
+
+## Deploy on Vercel
+
+The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js.
+
+Check out our [Next.js deployment documentation](https://nextjs.org/docs/app/building-your-application/deploying) for more details.
diff --git a/rag-web-ui/frontend/components.json b/rag-web-ui/frontend/components.json
new file mode 100644
index 0000000..8c574b7
--- /dev/null
+++ b/rag-web-ui/frontend/components.json
@@ -0,0 +1,17 @@
+{
+ "$schema": "https://ui.shadcn.com/schema.json",
+ "style": "default",
+ "rsc": true,
+ "tsx": true,
+ "tailwind": {
+ "config": "tailwind.config.ts",
+ "css": "src/app/globals.css",
+ "baseColor": "slate",
+ "cssVariables": true,
+ "prefix": ""
+ },
+ "aliases": {
+ "components": "@/components",
+ "utils": "@/lib/utils"
+ }
+}
\ No newline at end of file
diff --git a/rag-web-ui/frontend/eslint.config.mjs b/rag-web-ui/frontend/eslint.config.mjs
new file mode 100644
index 0000000..c85fb67
--- /dev/null
+++ b/rag-web-ui/frontend/eslint.config.mjs
@@ -0,0 +1,16 @@
+import { dirname } from "path";
+import { fileURLToPath } from "url";
+import { FlatCompat } from "@eslint/eslintrc";
+
+const __filename = fileURLToPath(import.meta.url);
+const __dirname = dirname(__filename);
+
+const compat = new FlatCompat({
+ baseDirectory: __dirname,
+});
+
+const eslintConfig = [
+ ...compat.extends("next/core-web-vitals", "next/typescript"),
+];
+
+export default eslintConfig;
diff --git a/rag-web-ui/frontend/next.config.js b/rag-web-ui/frontend/next.config.js
new file mode 100644
index 0000000..ca1096f
--- /dev/null
+++ b/rag-web-ui/frontend/next.config.js
@@ -0,0 +1,11 @@
+/** @type {import('next').NextConfig} */
+module.exports = {
+ output: "standalone",
+ experimental: {
+ // This is needed for standalone output to work correctly
+ outputFileTracingRoot: undefined,
+ outputStandalone: true,
+ skipMiddlewareUrlNormalize: true,
+ skipTrailingSlashRedirect: true,
+ },
+};
diff --git a/rag-web-ui/frontend/package.json b/rag-web-ui/frontend/package.json
new file mode 100644
index 0000000..2265b9c
--- /dev/null
+++ b/rag-web-ui/frontend/package.json
@@ -0,0 +1,61 @@
+{
+ "name": "rag-web-ui-frontend",
+ "version": "0.1.0",
+ "private": true,
+ "scripts": {
+ "dev": "next dev",
+ "build": "next build",
+ "start": "node server.js",
+ "lint": "next lint"
+ },
+ "dependencies": {
+ "@radix-ui/react-accordion": "^1.2.2",
+ "@radix-ui/react-alert-dialog": "^1.0.5",
+ "@radix-ui/react-dialog": "^1.1.4",
+ "@radix-ui/react-dropdown-menu": "^2.0.6",
+ "@radix-ui/react-label": "^2.1.1",
+ "@radix-ui/react-popover": "^1.1.4",
+ "@radix-ui/react-progress": "^1.1.1",
+ "@radix-ui/react-slot": "^1.1.1",
+ "@radix-ui/react-switch": "^1.1.2",
+ "@radix-ui/react-tabs": "^1.1.2",
+ "@radix-ui/react-toast": "^1.2.4",
+ "ai": "^4.0.1",
+ "class-variance-authority": "^0.7.1",
+ "clsx": "^2.1.1",
+ "date-fns": "^4.1.0",
+ "highlight.js": "^11.11.1",
+ "lucide-react": "^0.323.0",
+ "mdast-util-from-markdown": "^2.0.2",
+ "next": "14.2.32",
+ "next-themes": "^0.2.1",
+ "react": "^18",
+ "react-dom": "^18",
+ "react-dropzone": "^14.2.3",
+ "react-file-icon": "^1.5.0",
+ "react-markdown": "^9.0.3",
+ "react-syntax-highlighter": "^15.6.1",
+ "rehype-highlight": "^7.0.2",
+ "rehype-raw": "^7.0.0",
+ "remark-gfm": "^4.0.0",
+ "shadcn-ui": "^0.8.0",
+ "tailwind-merge": "^2.6.0",
+ "tailwindcss-animate": "^1.0.7",
+ "unified": "^11.0.5",
+ "unist-util-visit": "^5.0.0"
+ },
+ "devDependencies": {
+ "@radix-ui/react-select": "^2.1.4",
+ "@tailwindcss/line-clamp": "^0.4.4",
+ "@types/node": "^20",
+ "@types/react": "^18",
+ "@types/react-dom": "^18",
+ "@types/react-file-icon": "^1.0.4",
+ "autoprefixer": "^10.0.1",
+ "eslint": "^8",
+ "eslint-config-next": "14.1.0",
+ "postcss": "^8",
+ "tailwindcss": "^3.3.0",
+ "typescript": "^5"
+ }
+}
\ No newline at end of file
diff --git a/rag-web-ui/frontend/pnpm-lock.yaml b/rag-web-ui/frontend/pnpm-lock.yaml
new file mode 100644
index 0000000..b256994
--- /dev/null
+++ b/rag-web-ui/frontend/pnpm-lock.yaml
@@ -0,0 +1,7226 @@
+lockfileVersion: '9.0'
+
+settings:
+ autoInstallPeers: true
+ excludeLinksFromLockfile: false
+
+importers:
+
+ .:
+ dependencies:
+ '@radix-ui/react-accordion':
+ specifier: ^1.2.2
+ version: 1.2.12(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-alert-dialog':
+ specifier: ^1.0.5
+ version: 1.1.15(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-dialog':
+ specifier: ^1.1.4
+ version: 1.1.15(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-dropdown-menu':
+ specifier: ^2.0.6
+ version: 2.1.16(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-label':
+ specifier: ^2.1.1
+ version: 2.1.8(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-popover':
+ specifier: ^1.1.4
+ version: 1.1.15(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-progress':
+ specifier: ^1.1.1
+ version: 1.1.8(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-slot':
+ specifier: ^1.1.1
+ version: 1.2.4(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-switch':
+ specifier: ^1.1.2
+ version: 1.2.6(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-tabs':
+ specifier: ^1.1.2
+ version: 1.1.13(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-toast':
+ specifier: ^1.2.4
+ version: 1.2.15(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ ai:
+ specifier: ^4.0.1
+ version: 4.3.19(react@18.3.1)(zod@3.25.76)
+ class-variance-authority:
+ specifier: ^0.7.1
+ version: 0.7.1
+ clsx:
+ specifier: ^2.1.1
+ version: 2.1.1
+ date-fns:
+ specifier: ^4.1.0
+ version: 4.1.0
+ highlight.js:
+ specifier: ^11.11.1
+ version: 11.11.1
+ lucide-react:
+ specifier: ^0.323.0
+ version: 0.323.0(react@18.3.1)
+ mdast-util-from-markdown:
+ specifier: ^2.0.2
+ version: 2.0.3
+ next:
+ specifier: 14.2.32
+ version: 14.2.32(@babel/core@7.29.0)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ next-themes:
+ specifier: ^0.2.1
+ version: 0.2.1(next@14.2.32(@babel/core@7.29.0)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ react:
+ specifier: ^18
+ version: 18.3.1
+ react-dom:
+ specifier: ^18
+ version: 18.3.1(react@18.3.1)
+ react-dropzone:
+ specifier: ^14.2.3
+ version: 14.4.1(react@18.3.1)
+ react-file-icon:
+ specifier: ^1.5.0
+ version: 1.6.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ react-markdown:
+ specifier: ^9.0.3
+ version: 9.1.0(@types/react@18.3.28)(react@18.3.1)
+ react-syntax-highlighter:
+ specifier: ^15.6.1
+ version: 15.6.6(react@18.3.1)
+ rehype-highlight:
+ specifier: ^7.0.2
+ version: 7.0.2
+ rehype-raw:
+ specifier: ^7.0.0
+ version: 7.0.0
+ remark-gfm:
+ specifier: ^4.0.0
+ version: 4.0.1
+ shadcn-ui:
+ specifier: ^0.8.0
+ version: 0.8.0(typescript@5.9.3)
+ tailwind-merge:
+ specifier: ^2.6.0
+ version: 2.6.1
+ tailwindcss-animate:
+ specifier: ^1.0.7
+ version: 1.0.7(tailwindcss@3.4.19)
+ unified:
+ specifier: ^11.0.5
+ version: 11.0.5
+ unist-util-visit:
+ specifier: ^5.0.0
+ version: 5.1.0
+ devDependencies:
+ '@radix-ui/react-select':
+ specifier: ^2.1.4
+ version: 2.2.6(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@tailwindcss/line-clamp':
+ specifier: ^0.4.4
+ version: 0.4.4(tailwindcss@3.4.19)
+ '@types/node':
+ specifier: ^20
+ version: 20.19.39
+ '@types/react':
+ specifier: ^18
+ version: 18.3.28
+ '@types/react-dom':
+ specifier: ^18
+ version: 18.3.7(@types/react@18.3.28)
+ '@types/react-file-icon':
+ specifier: ^1.0.4
+ version: 1.0.5
+ autoprefixer:
+ specifier: ^10.0.1
+ version: 10.4.27(postcss@8.5.8)
+ eslint:
+ specifier: ^8
+ version: 8.57.1
+ eslint-config-next:
+ specifier: 14.1.0
+ version: 14.1.0(eslint@8.57.1)(typescript@5.9.3)
+ postcss:
+ specifier: ^8
+ version: 8.5.8
+ tailwindcss:
+ specifier: ^3.3.0
+ version: 3.4.19
+ typescript:
+ specifier: ^5
+ version: 5.9.3
+
+packages:
+
+ '@ai-sdk/provider-utils@2.2.8':
+ resolution: {integrity: sha512-fqhG+4sCVv8x7nFzYnFo19ryhAa3w096Kmc3hWxMQfW/TubPOmt3A6tYZhl4mUfQWWQMsuSkLrtjlWuXBVSGQA==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.23.8
+
+ '@ai-sdk/provider@1.1.3':
+ resolution: {integrity: sha512-qZMxYJ0qqX/RfnuIaab+zp8UAeJn/ygXXAffR5I4N0n1IrvA6qBsjc8hXLmBiMV2zoXlifkacF7sEFnYnjBcqg==}
+ engines: {node: '>=18'}
+
+ '@ai-sdk/react@1.2.12':
+ resolution: {integrity: sha512-jK1IZZ22evPZoQW3vlkZ7wvjYGYF+tRBKXtrcolduIkQ/m/sOAVcVeVDUDvh1T91xCnWCdUGCPZg2avZ90mv3g==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ react: ^18 || ^19 || ^19.0.0-rc
+ zod: ^3.23.8
+ peerDependenciesMeta:
+ zod:
+ optional: true
+
+ '@ai-sdk/ui-utils@1.2.11':
+ resolution: {integrity: sha512-3zcwCc8ezzFlwp3ZD15wAPjf2Au4s3vAbKsXQVyhxODHcmu0iyPO2Eua6D/vicq/AUm/BAo60r97O6HU+EI0+w==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.23.8
+
+ '@alloc/quick-lru@5.2.0':
+ resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==}
+ engines: {node: '>=10'}
+
+ '@antfu/ni@0.21.12':
+ resolution: {integrity: sha512-2aDL3WUv8hMJb2L3r/PIQWsTLyq7RQr3v9xD16fiz6O8ys1xEyLhhTOv8gxtZvJiTzjTF5pHoArvRdesGL1DMQ==}
+ hasBin: true
+
+ '@babel/code-frame@7.29.0':
+ resolution: {integrity: sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/compat-data@7.29.0':
+ resolution: {integrity: sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/core@7.29.0':
+ resolution: {integrity: sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/generator@7.29.1':
+ resolution: {integrity: sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-annotate-as-pure@7.27.3':
+ resolution: {integrity: sha512-fXSwMQqitTGeHLBC08Eq5yXz2m37E4pJX1qAU1+2cNedz/ifv/bVXft90VeSav5nFO61EcNgwr0aJxbyPaWBPg==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-compilation-targets@7.28.6':
+ resolution: {integrity: sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-create-class-features-plugin@7.28.6':
+ resolution: {integrity: sha512-dTOdvsjnG3xNT9Y0AUg1wAl38y+4Rl4sf9caSQZOXdNqVn+H+HbbJ4IyyHaIqNR6SW9oJpA/RuRjsjCw2IdIow==}
+ engines: {node: '>=6.9.0'}
+ peerDependencies:
+ '@babel/core': ^7.0.0
+
+ '@babel/helper-globals@7.28.0':
+ resolution: {integrity: sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-member-expression-to-functions@7.28.5':
+ resolution: {integrity: sha512-cwM7SBRZcPCLgl8a7cY0soT1SptSzAlMH39vwiRpOQkJlh53r5hdHwLSCZpQdVLT39sZt+CRpNwYG4Y2v77atg==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-module-imports@7.28.6':
+ resolution: {integrity: sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-module-transforms@7.28.6':
+ resolution: {integrity: sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==}
+ engines: {node: '>=6.9.0'}
+ peerDependencies:
+ '@babel/core': ^7.0.0
+
+ '@babel/helper-optimise-call-expression@7.27.1':
+ resolution: {integrity: sha512-URMGH08NzYFhubNSGJrpUEphGKQwMQYBySzat5cAByY1/YgIRkULnIy3tAMeszlL/so2HbeilYloUmSpd7GdVw==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-plugin-utils@7.28.6':
+ resolution: {integrity: sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-replace-supers@7.28.6':
+ resolution: {integrity: sha512-mq8e+laIk94/yFec3DxSjCRD2Z0TAjhVbEJY3UQrlwVo15Lmt7C2wAUbK4bjnTs4APkwsYLTahXRraQXhb1WCg==}
+ engines: {node: '>=6.9.0'}
+ peerDependencies:
+ '@babel/core': ^7.0.0
+
+ '@babel/helper-skip-transparent-expression-wrappers@7.27.1':
+ resolution: {integrity: sha512-Tub4ZKEXqbPjXgWLl2+3JpQAYBJ8+ikpQ2Ocj/q/r0LwE3UhENh7EUabyHjz2kCEsrRY83ew2DQdHluuiDQFzg==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-string-parser@7.27.1':
+ resolution: {integrity: sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-validator-identifier@7.28.5':
+ resolution: {integrity: sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-validator-option@7.27.1':
+ resolution: {integrity: sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helpers@7.29.2':
+ resolution: {integrity: sha512-HoGuUs4sCZNezVEKdVcwqmZN8GoHirLUcLaYVNBK2J0DadGtdcqgr3BCbvH8+XUo4NGjNl3VOtSjEKNzqfFgKw==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/parser@7.29.2':
+ resolution: {integrity: sha512-4GgRzy/+fsBa72/RZVJmGKPmZu9Byn8o4MoLpmNe1m8ZfYnz5emHLQz3U4gLud6Zwl0RZIcgiLD7Uq7ySFuDLA==}
+ engines: {node: '>=6.0.0'}
+ hasBin: true
+
+ '@babel/plugin-syntax-typescript@7.28.6':
+ resolution: {integrity: sha512-+nDNmQye7nlnuuHDboPbGm00Vqg3oO8niRRL27/4LYHUsHYh0zJ1xWOz0uRwNFmM1Avzk8wZbc6rdiYhomzv/A==}
+ engines: {node: '>=6.9.0'}
+ peerDependencies:
+ '@babel/core': ^7.0.0-0
+
+ '@babel/plugin-transform-typescript@7.28.6':
+ resolution: {integrity: sha512-0YWL2RFxOqEm9Efk5PvreamxPME8OyY0wM5wh5lHjF+VtVhdneCWGzZeSqzOfiobVqQaNCd2z0tQvnI9DaPWPw==}
+ engines: {node: '>=6.9.0'}
+ peerDependencies:
+ '@babel/core': ^7.0.0-0
+
+ '@babel/runtime@7.29.2':
+ resolution: {integrity: sha512-JiDShH45zKHWyGe4ZNVRrCjBz8Nh9TMmZG1kh4QTK8hCBTWBi8Da+i7s1fJw7/lYpM4ccepSNfqzZ/QvABBi5g==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/template@7.28.6':
+ resolution: {integrity: sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/traverse@7.29.0':
+ resolution: {integrity: sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/types@7.29.0':
+ resolution: {integrity: sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==}
+ engines: {node: '>=6.9.0'}
+
+ '@emnapi/core@1.9.2':
+ resolution: {integrity: sha512-UC+ZhH3XtczQYfOlu3lNEkdW/p4dsJ1r/bP7H8+rhao3TTTMO1ATq/4DdIi23XuGoFY+Cz0JmCbdVl0hz9jZcA==}
+
+ '@emnapi/runtime@1.9.2':
+ resolution: {integrity: sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw==}
+
+ '@emnapi/wasi-threads@1.2.1':
+ resolution: {integrity: sha512-uTII7OYF+/Mes/MrcIOYp5yOtSMLBWSIoLPpcgwipoiKbli6k322tcoFsxoIIxPDqW01SQGAgko4EzZi2BNv2w==}
+
+ '@eslint-community/eslint-utils@4.9.1':
+ resolution: {integrity: sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==}
+ engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+ peerDependencies:
+ eslint: ^6.0.0 || ^7.0.0 || >=8.0.0
+
+ '@eslint-community/regexpp@4.12.2':
+ resolution: {integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==}
+ engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0}
+
+ '@eslint/eslintrc@2.1.4':
+ resolution: {integrity: sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==}
+ engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+
+ '@eslint/js@8.57.1':
+ resolution: {integrity: sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==}
+ engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+
+ '@floating-ui/core@1.7.5':
+ resolution: {integrity: sha512-1Ih4WTWyw0+lKyFMcBHGbb5U5FtuHJuujoyyr5zTaWS5EYMeT6Jb2AuDeftsCsEuchO+mM2ij5+q9crhydzLhQ==}
+
+ '@floating-ui/dom@1.7.6':
+ resolution: {integrity: sha512-9gZSAI5XM36880PPMm//9dfiEngYoC6Am2izES1FF406YFsjvyBMmeJ2g4SAju3xWwtuynNRFL2s9hgxpLI5SQ==}
+
+ '@floating-ui/react-dom@2.1.8':
+ resolution: {integrity: sha512-cC52bHwM/n/CxS87FH0yWdngEZrjdtLW/qVruo68qg+prK7ZQ4YGdut2GyDVpoGeAYe/h899rVeOVm6Oi40k2A==}
+ peerDependencies:
+ react: '>=16.8.0'
+ react-dom: '>=16.8.0'
+
+ '@floating-ui/utils@0.2.11':
+ resolution: {integrity: sha512-RiB/yIh78pcIxl6lLMG0CgBXAZ2Y0eVHqMPYugu+9U0AeT6YBeiJpf7lbdJNIugFP5SIjwNRgo4DhR1Qxi26Gg==}
+
+ '@humanwhocodes/config-array@0.13.0':
+ resolution: {integrity: sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==}
+ engines: {node: '>=10.10.0'}
+ deprecated: Use @eslint/config-array instead
+
+ '@humanwhocodes/module-importer@1.0.1':
+ resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==}
+ engines: {node: '>=12.22'}
+
+ '@humanwhocodes/object-schema@2.0.3':
+ resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==}
+ deprecated: Use @eslint/object-schema instead
+
+ '@isaacs/cliui@8.0.2':
+ resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==}
+ engines: {node: '>=12'}
+
+ '@jridgewell/gen-mapping@0.3.13':
+ resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==}
+
+ '@jridgewell/remapping@2.3.5':
+ resolution: {integrity: sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==}
+
+ '@jridgewell/resolve-uri@3.1.2':
+ resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==}
+ engines: {node: '>=6.0.0'}
+
+ '@jridgewell/sourcemap-codec@1.5.5':
+ resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==}
+
+ '@jridgewell/trace-mapping@0.3.31':
+ resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==}
+
+ '@napi-rs/wasm-runtime@0.2.12':
+ resolution: {integrity: sha512-ZVWUcfwY4E/yPitQJl481FjFo3K22D6qF0DuFH6Y/nbnE11GY5uguDxZMGXPQ8WQ0128MXQD7TnfHyK4oWoIJQ==}
+
+ '@next/env@14.2.32':
+ resolution: {integrity: sha512-n9mQdigI6iZ/DF6pCTwMKeWgF2e8lg7qgt5M7HXMLtyhZYMnf/u905M18sSpPmHL9MKp9JHo56C6jrD2EvWxng==}
+
+ '@next/eslint-plugin-next@14.1.0':
+ resolution: {integrity: sha512-x4FavbNEeXx/baD/zC/SdrvkjSby8nBn8KcCREqk6UuwvwoAPZmaV8TFCAuo/cpovBRTIY67mHhe86MQQm/68Q==}
+
+ '@next/swc-darwin-arm64@14.2.32':
+ resolution: {integrity: sha512-osHXveM70zC+ilfuFa/2W6a1XQxJTvEhzEycnjUaVE8kpUS09lDpiDDX2YLdyFCzoUbvbo5r0X1Kp4MllIOShw==}
+ engines: {node: '>= 10'}
+ cpu: [arm64]
+ os: [darwin]
+
+ '@next/swc-darwin-x64@14.2.32':
+ resolution: {integrity: sha512-P9NpCAJuOiaHHpqtrCNncjqtSBi1f6QUdHK/+dNabBIXB2RUFWL19TY1Hkhu74OvyNQEYEzzMJCMQk5agjw1Qg==}
+ engines: {node: '>= 10'}
+ cpu: [x64]
+ os: [darwin]
+
+ '@next/swc-linux-arm64-gnu@14.2.32':
+ resolution: {integrity: sha512-v7JaO0oXXt6d+cFjrrKqYnR2ubrD+JYP7nQVRZgeo5uNE5hkCpWnHmXm9vy3g6foMO8SPwL0P3MPw1c+BjbAzA==}
+ engines: {node: '>= 10'}
+ cpu: [arm64]
+ os: [linux]
+ libc: [glibc]
+
+ '@next/swc-linux-arm64-musl@14.2.32':
+ resolution: {integrity: sha512-tA6sIKShXtSJBTH88i0DRd6I9n3ZTirmwpwAqH5zdJoQF7/wlJXR8DkPmKwYl5mFWhEKr5IIa3LfpMW9RRwKmQ==}
+ engines: {node: '>= 10'}
+ cpu: [arm64]
+ os: [linux]
+ libc: [musl]
+
+ '@next/swc-linux-x64-gnu@14.2.32':
+ resolution: {integrity: sha512-7S1GY4TdnlGVIdeXXKQdDkfDysoIVFMD0lJuVVMeb3eoVjrknQ0JNN7wFlhCvea0hEk0Sd4D1hedVChDKfV2jw==}
+ engines: {node: '>= 10'}
+ cpu: [x64]
+ os: [linux]
+ libc: [glibc]
+
+ '@next/swc-linux-x64-musl@14.2.32':
+ resolution: {integrity: sha512-OHHC81P4tirVa6Awk6eCQ6RBfWl8HpFsZtfEkMpJ5GjPsJ3nhPe6wKAJUZ/piC8sszUkAgv3fLflgzPStIwfWg==}
+ engines: {node: '>= 10'}
+ cpu: [x64]
+ os: [linux]
+ libc: [musl]
+
+ '@next/swc-win32-arm64-msvc@14.2.32':
+ resolution: {integrity: sha512-rORQjXsAFeX6TLYJrCG5yoIDj+NKq31Rqwn8Wpn/bkPNy5rTHvOXkW8mLFonItS7QC6M+1JIIcLe+vOCTOYpvg==}
+ engines: {node: '>= 10'}
+ cpu: [arm64]
+ os: [win32]
+
+ '@next/swc-win32-ia32-msvc@14.2.32':
+ resolution: {integrity: sha512-jHUeDPVHrgFltqoAqDB6g6OStNnFxnc7Aks3p0KE0FbwAvRg6qWKYF5mSTdCTxA3axoSAUwxYdILzXJfUwlHhA==}
+ engines: {node: '>= 10'}
+ cpu: [ia32]
+ os: [win32]
+
+ '@next/swc-win32-x64-msvc@14.2.32':
+ resolution: {integrity: sha512-2N0lSoU4GjfLSO50wvKpMQgKd4HdI2UHEhQPPPnlgfBJlOgJxkjpkYBqzk08f1gItBB6xF/n+ykso2hgxuydsA==}
+ engines: {node: '>= 10'}
+ cpu: [x64]
+ os: [win32]
+
+ '@nodelib/fs.scandir@2.1.5':
+ resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==}
+ engines: {node: '>= 8'}
+
+ '@nodelib/fs.stat@2.0.5':
+ resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==}
+ engines: {node: '>= 8'}
+
+ '@nodelib/fs.walk@1.2.8':
+ resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==}
+ engines: {node: '>= 8'}
+
+ '@nolyfill/is-core-module@1.0.39':
+ resolution: {integrity: sha512-nn5ozdjYQpUCZlWGuxcJY/KpxkWQs4DcbMCmKojjyrYDEAGy4Ce19NN4v5MduafTwJlbKc99UA8YhSVqq9yPZA==}
+ engines: {node: '>=12.4.0'}
+
+ '@opentelemetry/api@1.9.0':
+ resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==}
+ engines: {node: '>=8.0.0'}
+
+ '@pkgjs/parseargs@0.11.0':
+ resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==}
+ engines: {node: '>=14'}
+
+ '@radix-ui/number@1.1.1':
+ resolution: {integrity: sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==}
+
+ '@radix-ui/primitive@1.1.3':
+ resolution: {integrity: sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==}
+
+ '@radix-ui/react-accordion@1.2.12':
+ resolution: {integrity: sha512-T4nygeh9YE9dLRPhAHSeOZi7HBXo+0kYIPJXayZfvWOWA0+n3dESrZbjfDPUABkUNym6Hd+f2IR113To8D2GPA==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-alert-dialog@1.1.15':
+ resolution: {integrity: sha512-oTVLkEw5GpdRe29BqJ0LSDFWI3qu0vR1M0mUkOQWDIUnY/QIkLpgDMWuKxP94c2NAC2LGcgVhG1ImF3jkZ5wXw==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-arrow@1.1.7':
+ resolution: {integrity: sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-collapsible@1.1.12':
+ resolution: {integrity: sha512-Uu+mSh4agx2ib1uIGPP4/CKNULyajb3p92LsVXmH2EHVMTfZWpll88XJ0j4W0z3f8NK1eYl1+Mf/szHPmcHzyA==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-collection@1.1.7':
+ resolution: {integrity: sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-compose-refs@1.1.2':
+ resolution: {integrity: sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-context@1.1.2':
+ resolution: {integrity: sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-context@1.1.3':
+ resolution: {integrity: sha512-ieIFACdMpYfMEjF0rEf5KLvfVyIkOz6PDGyNnP+u+4xQ6jny3VCgA4OgXOwNx2aUkxn8zx9fiVcM8CfFYv9Lxw==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-dialog@1.1.15':
+ resolution: {integrity: sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-direction@1.1.1':
+ resolution: {integrity: sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-dismissable-layer@1.1.11':
+ resolution: {integrity: sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-dropdown-menu@2.1.16':
+ resolution: {integrity: sha512-1PLGQEynI/3OX/ftV54COn+3Sud/Mn8vALg2rWnBLnRaGtJDduNW/22XjlGgPdpcIbiQxjKtb7BkcjP00nqfJw==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-focus-guards@1.1.3':
+ resolution: {integrity: sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-focus-scope@1.1.7':
+ resolution: {integrity: sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-id@1.1.1':
+ resolution: {integrity: sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-label@2.1.8':
+ resolution: {integrity: sha512-FmXs37I6hSBVDlO4y764TNz1rLgKwjJMQ0EGte6F3Cb3f4bIuHB/iLa/8I9VKkmOy+gNHq8rql3j686ACVV21A==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-menu@2.1.16':
+ resolution: {integrity: sha512-72F2T+PLlphrqLcAotYPp0uJMr5SjP5SL01wfEspJbru5Zs5vQaSHb4VB3ZMJPimgHHCHG7gMOeOB9H3Hdmtxg==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-popover@1.1.15':
+ resolution: {integrity: sha512-kr0X2+6Yy/vJzLYJUPCZEc8SfQcf+1COFoAqauJm74umQhta9M7lNJHP7QQS3vkvcGLQUbWpMzwrXYwrYztHKA==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-popper@1.2.8':
+ resolution: {integrity: sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-portal@1.1.9':
+ resolution: {integrity: sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-presence@1.1.5':
+ resolution: {integrity: sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-primitive@2.1.3':
+ resolution: {integrity: sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-primitive@2.1.4':
+ resolution: {integrity: sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-progress@1.1.8':
+ resolution: {integrity: sha512-+gISHcSPUJ7ktBy9RnTqbdKW78bcGke3t6taawyZ71pio1JewwGSJizycs7rLhGTvMJYCQB1DBK4KQsxs7U8dA==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-roving-focus@1.1.11':
+ resolution: {integrity: sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-select@2.2.6':
+ resolution: {integrity: sha512-I30RydO+bnn2PQztvo25tswPH+wFBjehVGtmagkU78yMdwTwVf12wnAOF+AeP8S2N8xD+5UPbGhkUfPyvT+mwQ==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-slot@1.2.3':
+ resolution: {integrity: sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-slot@1.2.4':
+ resolution: {integrity: sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-switch@1.2.6':
+ resolution: {integrity: sha512-bByzr1+ep1zk4VubeEVViV592vu2lHE2BZY5OnzehZqOOgogN80+mNtCqPkhn2gklJqOpxWgPoYTSnhBCqpOXQ==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-tabs@1.1.13':
+ resolution: {integrity: sha512-7xdcatg7/U+7+Udyoj2zodtI9H/IIopqo+YOIcZOq1nJwXWBZ9p8xiu5llXlekDbZkca79a/fozEYQXIA4sW6A==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-toast@1.2.15':
+ resolution: {integrity: sha512-3OSz3TacUWy4WtOXV38DggwxoqJK4+eDkNMl5Z/MJZaoUPaP4/9lf81xXMe1I2ReTAptverZUpbPY4wWwWyL5g==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-use-callback-ref@1.1.1':
+ resolution: {integrity: sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-use-controllable-state@1.2.2':
+ resolution: {integrity: sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-use-effect-event@0.0.2':
+ resolution: {integrity: sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-use-escape-keydown@1.1.1':
+ resolution: {integrity: sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-use-layout-effect@1.1.1':
+ resolution: {integrity: sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-use-previous@1.1.1':
+ resolution: {integrity: sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-use-rect@1.1.1':
+ resolution: {integrity: sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-use-size@1.1.1':
+ resolution: {integrity: sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-visually-hidden@1.2.3':
+ resolution: {integrity: sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/rect@1.1.1':
+ resolution: {integrity: sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==}
+
+ '@rtsao/scc@1.1.0':
+ resolution: {integrity: sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==}
+
+ '@rushstack/eslint-patch@1.16.1':
+ resolution: {integrity: sha512-TvZbIpeKqGQQ7X0zSCvPH9riMSFQFSggnfBjFZ1mEoILW+UuXCKwOoPcgjMwiUtRqFZ8jWhPJc4um14vC6I4ag==}
+
+ '@swc/counter@0.1.3':
+ resolution: {integrity: sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==}
+
+ '@swc/helpers@0.5.5':
+ resolution: {integrity: sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==}
+
+ '@tailwindcss/line-clamp@0.4.4':
+ resolution: {integrity: sha512-5U6SY5z8N42VtrCrKlsTAA35gy2VSyYtHWCsg1H87NU1SXnEfekTVlrga9fzUDrrHcGi2Lb5KenUWb4lRQT5/g==}
+ peerDependencies:
+ tailwindcss: '>=2.0.0 || >=3.0.0 || >=3.0.0-alpha.1'
+
+ '@ts-morph/common@0.19.0':
+ resolution: {integrity: sha512-Unz/WHmd4pGax91rdIKWi51wnVUW11QttMEPpBiBgIewnc9UQIX7UDLxr5vRlqeByXCwhkF6VabSsI0raWcyAQ==}
+
+ '@tybys/wasm-util@0.10.1':
+ resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==}
+
+ '@types/debug@4.1.13':
+ resolution: {integrity: sha512-KSVgmQmzMwPlmtljOomayoR89W4FynCAi3E8PPs7vmDVPe84hT+vGPKkJfThkmXs0x0jAaa9U8uW8bbfyS2fWw==}
+
+ '@types/diff-match-patch@1.0.36':
+ resolution: {integrity: sha512-xFdR6tkm0MWvBfO8xXCSsinYxHcqkQUlcHeSpMC2ukzOb6lwQAfDmW+Qt0AvlGd8HpsS28qKsB+oPeJn9I39jg==}
+
+ '@types/estree-jsx@1.0.5':
+ resolution: {integrity: sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==}
+
+ '@types/estree@1.0.8':
+ resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==}
+
+ '@types/hast@2.3.10':
+ resolution: {integrity: sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw==}
+
+ '@types/hast@3.0.4':
+ resolution: {integrity: sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==}
+
+ '@types/json5@0.0.29':
+ resolution: {integrity: sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==}
+
+ '@types/mdast@4.0.4':
+ resolution: {integrity: sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==}
+
+ '@types/ms@2.1.0':
+ resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==}
+
+ '@types/node@20.19.39':
+ resolution: {integrity: sha512-orrrD74MBUyK8jOAD/r0+lfa1I2MO6I+vAkmAWzMYbCcgrN4lCrmK52gRFQq/JRxfYPfonkr4b0jcY7Olqdqbw==}
+
+ '@types/prop-types@15.7.15':
+ resolution: {integrity: sha512-F6bEyamV9jKGAFBEmlQnesRPGOQqS2+Uwi0Em15xenOxHaf2hv6L8YCVn3rPdPJOiJfPiCnLIRyvwVaqMY3MIw==}
+
+ '@types/react-dom@18.3.7':
+ resolution: {integrity: sha512-MEe3UeoENYVFXzoXEWsvcpg6ZvlrFNlOQ7EOsvhI3CfAXwzPfO8Qwuxd40nepsYKqyyVQnTdEfv68q91yLcKrQ==}
+ peerDependencies:
+ '@types/react': ^18.0.0
+
+ '@types/react-file-icon@1.0.5':
+ resolution: {integrity: sha512-2QkghuxsQhwJ7J1QdkFxMEciPyh/H/fX/ShAHAje1iuwcOnCI3AwnMZ3LHYvNv/3XYArV5KCbMHOtqiHIYJg0Q==}
+
+ '@types/react@18.3.28':
+ resolution: {integrity: sha512-z9VXpC7MWrhfWipitjNdgCauoMLRdIILQsAEV+ZesIzBq/oUlxk0m3ApZuMFCXdnS4U7KrI+l3WRUEGQ8K1QKw==}
+
+ '@types/unist@2.0.11':
+ resolution: {integrity: sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==}
+
+ '@types/unist@3.0.3':
+ resolution: {integrity: sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==}
+
+ '@typescript-eslint/parser@6.21.0':
+ resolution: {integrity: sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==}
+ engines: {node: ^16.0.0 || >=18.0.0}
+ peerDependencies:
+ eslint: ^7.0.0 || ^8.0.0
+ typescript: '*'
+ peerDependenciesMeta:
+ typescript:
+ optional: true
+
+ '@typescript-eslint/scope-manager@6.21.0':
+ resolution: {integrity: sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==}
+ engines: {node: ^16.0.0 || >=18.0.0}
+
+ '@typescript-eslint/types@6.21.0':
+ resolution: {integrity: sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==}
+ engines: {node: ^16.0.0 || >=18.0.0}
+
+ '@typescript-eslint/typescript-estree@6.21.0':
+ resolution: {integrity: sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==}
+ engines: {node: ^16.0.0 || >=18.0.0}
+ peerDependencies:
+ typescript: '*'
+ peerDependenciesMeta:
+ typescript:
+ optional: true
+
+ '@typescript-eslint/visitor-keys@6.21.0':
+ resolution: {integrity: sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==}
+ engines: {node: ^16.0.0 || >=18.0.0}
+
+ '@ungap/structured-clone@1.3.0':
+ resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==}
+
+ '@unrs/resolver-binding-android-arm-eabi@1.11.1':
+ resolution: {integrity: sha512-ppLRUgHVaGRWUx0R0Ut06Mjo9gBaBkg3v/8AxusGLhsIotbBLuRk51rAzqLC8gq6NyyAojEXglNjzf6R948DNw==}
+ cpu: [arm]
+ os: [android]
+
+ '@unrs/resolver-binding-android-arm64@1.11.1':
+ resolution: {integrity: sha512-lCxkVtb4wp1v+EoN+HjIG9cIIzPkX5OtM03pQYkG+U5O/wL53LC4QbIeazgiKqluGeVEeBlZahHalCaBvU1a2g==}
+ cpu: [arm64]
+ os: [android]
+
+ '@unrs/resolver-binding-darwin-arm64@1.11.1':
+ resolution: {integrity: sha512-gPVA1UjRu1Y/IsB/dQEsp2V1pm44Of6+LWvbLc9SDk1c2KhhDRDBUkQCYVWe6f26uJb3fOK8saWMgtX8IrMk3g==}
+ cpu: [arm64]
+ os: [darwin]
+
+ '@unrs/resolver-binding-darwin-x64@1.11.1':
+ resolution: {integrity: sha512-cFzP7rWKd3lZaCsDze07QX1SC24lO8mPty9vdP+YVa3MGdVgPmFc59317b2ioXtgCMKGiCLxJ4HQs62oz6GfRQ==}
+ cpu: [x64]
+ os: [darwin]
+
+ '@unrs/resolver-binding-freebsd-x64@1.11.1':
+ resolution: {integrity: sha512-fqtGgak3zX4DCB6PFpsH5+Kmt/8CIi4Bry4rb1ho6Av2QHTREM+47y282Uqiu3ZRF5IQioJQ5qWRV6jduA+iGw==}
+ cpu: [x64]
+ os: [freebsd]
+
+ '@unrs/resolver-binding-linux-arm-gnueabihf@1.11.1':
+ resolution: {integrity: sha512-u92mvlcYtp9MRKmP+ZvMmtPN34+/3lMHlyMj7wXJDeXxuM0Vgzz0+PPJNsro1m3IZPYChIkn944wW8TYgGKFHw==}
+ cpu: [arm]
+ os: [linux]
+
+ '@unrs/resolver-binding-linux-arm-musleabihf@1.11.1':
+ resolution: {integrity: sha512-cINaoY2z7LVCrfHkIcmvj7osTOtm6VVT16b5oQdS4beibX2SYBwgYLmqhBjA1t51CarSaBuX5YNsWLjsqfW5Cw==}
+ cpu: [arm]
+ os: [linux]
+
+ '@unrs/resolver-binding-linux-arm64-gnu@1.11.1':
+ resolution: {integrity: sha512-34gw7PjDGB9JgePJEmhEqBhWvCiiWCuXsL9hYphDF7crW7UgI05gyBAi6MF58uGcMOiOqSJ2ybEeCvHcq0BCmQ==}
+ cpu: [arm64]
+ os: [linux]
+ libc: [glibc]
+
+ '@unrs/resolver-binding-linux-arm64-musl@1.11.1':
+ resolution: {integrity: sha512-RyMIx6Uf53hhOtJDIamSbTskA99sPHS96wxVE/bJtePJJtpdKGXO1wY90oRdXuYOGOTuqjT8ACccMc4K6QmT3w==}
+ cpu: [arm64]
+ os: [linux]
+ libc: [musl]
+
+ '@unrs/resolver-binding-linux-ppc64-gnu@1.11.1':
+ resolution: {integrity: sha512-D8Vae74A4/a+mZH0FbOkFJL9DSK2R6TFPC9M+jCWYia/q2einCubX10pecpDiTmkJVUH+y8K3BZClycD8nCShA==}
+ cpu: [ppc64]
+ os: [linux]
+ libc: [glibc]
+
+ '@unrs/resolver-binding-linux-riscv64-gnu@1.11.1':
+ resolution: {integrity: sha512-frxL4OrzOWVVsOc96+V3aqTIQl1O2TjgExV4EKgRY09AJ9leZpEg8Ak9phadbuX0BA4k8U5qtvMSQQGGmaJqcQ==}
+ cpu: [riscv64]
+ os: [linux]
+ libc: [glibc]
+
+ '@unrs/resolver-binding-linux-riscv64-musl@1.11.1':
+ resolution: {integrity: sha512-mJ5vuDaIZ+l/acv01sHoXfpnyrNKOk/3aDoEdLO/Xtn9HuZlDD6jKxHlkN8ZhWyLJsRBxfv9GYM2utQ1SChKew==}
+ cpu: [riscv64]
+ os: [linux]
+ libc: [musl]
+
+ '@unrs/resolver-binding-linux-s390x-gnu@1.11.1':
+ resolution: {integrity: sha512-kELo8ebBVtb9sA7rMe1Cph4QHreByhaZ2QEADd9NzIQsYNQpt9UkM9iqr2lhGr5afh885d/cB5QeTXSbZHTYPg==}
+ cpu: [s390x]
+ os: [linux]
+ libc: [glibc]
+
+ '@unrs/resolver-binding-linux-x64-gnu@1.11.1':
+ resolution: {integrity: sha512-C3ZAHugKgovV5YvAMsxhq0gtXuwESUKc5MhEtjBpLoHPLYM+iuwSj3lflFwK3DPm68660rZ7G8BMcwSro7hD5w==}
+ cpu: [x64]
+ os: [linux]
+ libc: [glibc]
+
+ '@unrs/resolver-binding-linux-x64-musl@1.11.1':
+ resolution: {integrity: sha512-rV0YSoyhK2nZ4vEswT/QwqzqQXw5I6CjoaYMOX0TqBlWhojUf8P94mvI7nuJTeaCkkds3QE4+zS8Ko+GdXuZtA==}
+ cpu: [x64]
+ os: [linux]
+ libc: [musl]
+
+ '@unrs/resolver-binding-wasm32-wasi@1.11.1':
+ resolution: {integrity: sha512-5u4RkfxJm+Ng7IWgkzi3qrFOvLvQYnPBmjmZQ8+szTK/b31fQCnleNl1GgEt7nIsZRIf5PLhPwT0WM+q45x/UQ==}
+ engines: {node: '>=14.0.0'}
+ cpu: [wasm32]
+
+ '@unrs/resolver-binding-win32-arm64-msvc@1.11.1':
+ resolution: {integrity: sha512-nRcz5Il4ln0kMhfL8S3hLkxI85BXs3o8EYoattsJNdsX4YUU89iOkVn7g0VHSRxFuVMdM4Q1jEpIId1Ihim/Uw==}
+ cpu: [arm64]
+ os: [win32]
+
+ '@unrs/resolver-binding-win32-ia32-msvc@1.11.1':
+ resolution: {integrity: sha512-DCEI6t5i1NmAZp6pFonpD5m7i6aFrpofcp4LA2i8IIq60Jyo28hamKBxNrZcyOwVOZkgsRp9O2sXWBWP8MnvIQ==}
+ cpu: [ia32]
+ os: [win32]
+
+ '@unrs/resolver-binding-win32-x64-msvc@1.11.1':
+ resolution: {integrity: sha512-lrW200hZdbfRtztbygyaq/6jP6AKE8qQN2KvPcJ+x7wiD038YtnYtZ82IMNJ69GJibV7bwL3y9FgK+5w/pYt6g==}
+ cpu: [x64]
+ os: [win32]
+
+ acorn-jsx@5.3.2:
+ resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==}
+ peerDependencies:
+ acorn: ^6.0.0 || ^7.0.0 || ^8.0.0
+
+ acorn@8.16.0:
+ resolution: {integrity: sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==}
+ engines: {node: '>=0.4.0'}
+ hasBin: true
+
+ agent-base@7.1.4:
+ resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==}
+ engines: {node: '>= 14'}
+
+ ai@4.3.19:
+ resolution: {integrity: sha512-dIE2bfNpqHN3r6IINp9znguYdhIOheKW2LDigAMrgt/upT3B8eBGPSCblENvaZGoq+hxaN9fSMzjWpbqloP+7Q==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ react: ^18 || ^19 || ^19.0.0-rc
+ zod: ^3.23.8
+ peerDependenciesMeta:
+ react:
+ optional: true
+
+ ajv@6.14.0:
+ resolution: {integrity: sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==}
+
+ ansi-regex@5.0.1:
+ resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==}
+ engines: {node: '>=8'}
+
+ ansi-regex@6.2.2:
+ resolution: {integrity: sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==}
+ engines: {node: '>=12'}
+
+ ansi-styles@4.3.0:
+ resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==}
+ engines: {node: '>=8'}
+
+ ansi-styles@6.2.3:
+ resolution: {integrity: sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==}
+ engines: {node: '>=12'}
+
+ any-promise@1.3.0:
+ resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==}
+
+ anymatch@3.1.3:
+ resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==}
+ engines: {node: '>= 8'}
+
+ arg@5.0.2:
+ resolution: {integrity: sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==}
+
+ argparse@2.0.1:
+ resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==}
+
+ aria-hidden@1.2.6:
+ resolution: {integrity: sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA==}
+ engines: {node: '>=10'}
+
+ aria-query@5.3.2:
+ resolution: {integrity: sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==}
+ engines: {node: '>= 0.4'}
+
+ array-buffer-byte-length@1.0.2:
+ resolution: {integrity: sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw==}
+ engines: {node: '>= 0.4'}
+
+ array-includes@3.1.9:
+ resolution: {integrity: sha512-FmeCCAenzH0KH381SPT5FZmiA/TmpndpcaShhfgEN9eCVjnFBqq3l1xrI42y8+PPLI6hypzou4GXw00WHmPBLQ==}
+ engines: {node: '>= 0.4'}
+
+ array-union@2.1.0:
+ resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==}
+ engines: {node: '>=8'}
+
+ array.prototype.findlast@1.2.5:
+ resolution: {integrity: sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ==}
+ engines: {node: '>= 0.4'}
+
+ array.prototype.findlastindex@1.2.6:
+ resolution: {integrity: sha512-F/TKATkzseUExPlfvmwQKGITM3DGTK+vkAsCZoDc5daVygbJBnjEUCbgkAvVFsgfXfX4YIqZ/27G3k3tdXrTxQ==}
+ engines: {node: '>= 0.4'}
+
+ array.prototype.flat@1.3.3:
+ resolution: {integrity: sha512-rwG/ja1neyLqCuGZ5YYrznA62D4mZXg0i1cIskIUKSiqF3Cje9/wXAls9B9s1Wa2fomMsIv8czB8jZcPmxCXFg==}
+ engines: {node: '>= 0.4'}
+
+ array.prototype.flatmap@1.3.3:
+ resolution: {integrity: sha512-Y7Wt51eKJSyi80hFrJCePGGNo5ktJCslFuboqJsbf57CCPcm5zztluPlc4/aD8sWsKvlwatezpV4U1efk8kpjg==}
+ engines: {node: '>= 0.4'}
+
+ array.prototype.tosorted@1.1.4:
+ resolution: {integrity: sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA==}
+ engines: {node: '>= 0.4'}
+
+ arraybuffer.prototype.slice@1.0.4:
+ resolution: {integrity: sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ==}
+ engines: {node: '>= 0.4'}
+
+ ast-types-flow@0.0.8:
+ resolution: {integrity: sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ==}
+
+ ast-types@0.16.1:
+ resolution: {integrity: sha512-6t10qk83GOG8p0vKmaCr8eiilZwO171AvbROMtvvNiwrTly62t+7XkA8RdIIVbpMhCASAsxgAzdRSwh6nw/5Dg==}
+ engines: {node: '>=4'}
+
+ async-function@1.0.0:
+ resolution: {integrity: sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA==}
+ engines: {node: '>= 0.4'}
+
+ attr-accept@2.2.5:
+ resolution: {integrity: sha512-0bDNnY/u6pPwHDMoF0FieU354oBi0a8rD9FcsLwzcGWbc8KS8KPIi7y+s13OlVY+gMWc/9xEMUgNE6Qm8ZllYQ==}
+ engines: {node: '>=4'}
+
+ autoprefixer@10.4.27:
+ resolution: {integrity: sha512-NP9APE+tO+LuJGn7/9+cohklunJsXWiaWEfV3si4Gi/XHDwVNgkwr1J3RQYFIvPy76GmJ9/bW8vyoU1LcxwKHA==}
+ engines: {node: ^10 || ^12 || >=14}
+ hasBin: true
+ peerDependencies:
+ postcss: ^8.1.0
+
+ available-typed-arrays@1.0.7:
+ resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==}
+ engines: {node: '>= 0.4'}
+
+ axe-core@4.11.2:
+ resolution: {integrity: sha512-byD6KPdvo72y/wj2T/4zGEvvlis+PsZsn/yPS3pEO+sFpcrqRpX/TJCxvVaEsNeMrfQbCr7w163YqoD9IYwHXw==}
+ engines: {node: '>=4'}
+
+ axobject-query@4.1.0:
+ resolution: {integrity: sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==}
+ engines: {node: '>= 0.4'}
+
+ bail@2.0.2:
+ resolution: {integrity: sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==}
+
+ balanced-match@1.0.2:
+ resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==}
+
+ base64-js@1.5.1:
+ resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==}
+
+ baseline-browser-mapping@2.10.16:
+ resolution: {integrity: sha512-Lyf3aK28zpsD1yQMiiHD4RvVb6UdMoo8xzG2XzFIfR9luPzOpcBlAsT/qfB1XWS1bxWT+UtE4WmQgsp297FYOA==}
+ engines: {node: '>=6.0.0'}
+ hasBin: true
+
+ binary-extensions@2.3.0:
+ resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==}
+ engines: {node: '>=8'}
+
+ bl@5.1.0:
+ resolution: {integrity: sha512-tv1ZJHLfTDnXE6tMHv73YgSJaWR2AFuPwMntBe7XL/GBFHnT0CLnsHMogfk5+GzCDC5ZWarSCYaIGATZt9dNsQ==}
+
+ brace-expansion@1.1.13:
+ resolution: {integrity: sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w==}
+
+ brace-expansion@2.0.3:
+ resolution: {integrity: sha512-MCV/fYJEbqx68aE58kv2cA/kiky1G8vux3OR6/jbS+jIMe/6fJWa0DTzJU7dqijOWYwHi1t29FlfYI9uytqlpA==}
+
+ braces@3.0.3:
+ resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==}
+ engines: {node: '>=8'}
+
+ browserslist@4.28.2:
+ resolution: {integrity: sha512-48xSriZYYg+8qXna9kwqjIVzuQxi+KYWp2+5nCYnYKPTr0LvD89Jqk2Or5ogxz0NUMfIjhh2lIUX/LyX9B4oIg==}
+ engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7}
+ hasBin: true
+
+ buffer@6.0.3:
+ resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==}
+
+ busboy@1.6.0:
+ resolution: {integrity: sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==}
+ engines: {node: '>=10.16.0'}
+
+ call-bind-apply-helpers@1.0.2:
+ resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==}
+ engines: {node: '>= 0.4'}
+
+ call-bind@1.0.8:
+ resolution: {integrity: sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==}
+ engines: {node: '>= 0.4'}
+
+ call-bound@1.0.4:
+ resolution: {integrity: sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==}
+ engines: {node: '>= 0.4'}
+
+ callsites@3.1.0:
+ resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==}
+ engines: {node: '>=6'}
+
+ camelcase-css@2.0.1:
+ resolution: {integrity: sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==}
+ engines: {node: '>= 6'}
+
+ caniuse-lite@1.0.30001786:
+ resolution: {integrity: sha512-4oxTZEvqmLLrERwxO76yfKM7acZo310U+v4kqexI2TL1DkkUEMT8UijrxxcnVdxR3qkVf5awGRX+4Z6aPHVKrA==}
+
+ ccount@2.0.1:
+ resolution: {integrity: sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==}
+
+ chalk@4.1.2:
+ resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
+ engines: {node: '>=10'}
+
+ chalk@5.2.0:
+ resolution: {integrity: sha512-ree3Gqw/nazQAPuJJEy+avdl7QfZMcUvmHIKgEZkGL+xOBzRvup5Hxo6LHuMceSxOabuJLJm5Yp/92R9eMmMvA==}
+ engines: {node: ^12.17.0 || ^14.13 || >=16.0.0}
+
+ chalk@5.6.2:
+ resolution: {integrity: sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==}
+ engines: {node: ^12.17.0 || ^14.13 || >=16.0.0}
+
+ character-entities-html4@2.1.0:
+ resolution: {integrity: sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==}
+
+ character-entities-legacy@1.1.4:
+ resolution: {integrity: sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==}
+
+ character-entities-legacy@3.0.0:
+ resolution: {integrity: sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==}
+
+ character-entities@1.2.4:
+ resolution: {integrity: sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==}
+
+ character-entities@2.0.2:
+ resolution: {integrity: sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==}
+
+ character-reference-invalid@1.1.4:
+ resolution: {integrity: sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==}
+
+ character-reference-invalid@2.0.1:
+ resolution: {integrity: sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==}
+
+ chokidar@3.6.0:
+ resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==}
+ engines: {node: '>= 8.10.0'}
+
+ class-variance-authority@0.7.1:
+ resolution: {integrity: sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==}
+
+ cli-cursor@4.0.0:
+ resolution: {integrity: sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
+ cli-spinners@2.9.2:
+ resolution: {integrity: sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==}
+ engines: {node: '>=6'}
+
+ client-only@0.0.1:
+ resolution: {integrity: sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==}
+
+ clone@1.0.4:
+ resolution: {integrity: sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==}
+ engines: {node: '>=0.8'}
+
+ clsx@2.1.1:
+ resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==}
+ engines: {node: '>=6'}
+
+ code-block-writer@12.0.0:
+ resolution: {integrity: sha512-q4dMFMlXtKR3XNBHyMHt/3pwYNA69EDk00lloMOaaUMKPUXBw6lpXtbu3MMVG6/uOihGnRDOlkyqsONEUj60+w==}
+
+ color-convert@2.0.1:
+ resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==}
+ engines: {node: '>=7.0.0'}
+
+ color-name@1.1.4:
+ resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==}
+
+ colord@2.9.3:
+ resolution: {integrity: sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==}
+
+ comma-separated-tokens@1.0.8:
+ resolution: {integrity: sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==}
+
+ comma-separated-tokens@2.0.3:
+ resolution: {integrity: sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==}
+
+ commander@10.0.1:
+ resolution: {integrity: sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==}
+ engines: {node: '>=14'}
+
+ commander@4.1.1:
+ resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==}
+ engines: {node: '>= 6'}
+
+ concat-map@0.0.1:
+ resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==}
+
+ convert-source-map@2.0.0:
+ resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==}
+
+ cosmiconfig@8.3.6:
+ resolution: {integrity: sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ typescript: '>=4.9.5'
+ peerDependenciesMeta:
+ typescript:
+ optional: true
+
+ cross-spawn@7.0.6:
+ resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==}
+ engines: {node: '>= 8'}
+
+ cssesc@3.0.0:
+ resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==}
+ engines: {node: '>=4'}
+ hasBin: true
+
+ csstype@3.2.3:
+ resolution: {integrity: sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==}
+
+ damerau-levenshtein@1.0.8:
+ resolution: {integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==}
+
+ data-uri-to-buffer@4.0.1:
+ resolution: {integrity: sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==}
+ engines: {node: '>= 12'}
+
+ data-view-buffer@1.0.2:
+ resolution: {integrity: sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ==}
+ engines: {node: '>= 0.4'}
+
+ data-view-byte-length@1.0.2:
+ resolution: {integrity: sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ==}
+ engines: {node: '>= 0.4'}
+
+ data-view-byte-offset@1.0.1:
+ resolution: {integrity: sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ==}
+ engines: {node: '>= 0.4'}
+
+ date-fns@4.1.0:
+ resolution: {integrity: sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg==}
+
+ debug@3.2.7:
+ resolution: {integrity: sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==}
+ peerDependencies:
+ supports-color: '*'
+ peerDependenciesMeta:
+ supports-color:
+ optional: true
+
+ debug@4.4.3:
+ resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==}
+ engines: {node: '>=6.0'}
+ peerDependencies:
+ supports-color: '*'
+ peerDependenciesMeta:
+ supports-color:
+ optional: true
+
+ decode-named-character-reference@1.3.0:
+ resolution: {integrity: sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q==}
+
+ deep-is@0.1.4:
+ resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==}
+
+ defaults@1.0.4:
+ resolution: {integrity: sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==}
+
+ define-data-property@1.1.4:
+ resolution: {integrity: sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==}
+ engines: {node: '>= 0.4'}
+
+ define-properties@1.2.1:
+ resolution: {integrity: sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==}
+ engines: {node: '>= 0.4'}
+
+ dequal@2.0.3:
+ resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==}
+ engines: {node: '>=6'}
+
+ detect-node-es@1.1.0:
+ resolution: {integrity: sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==}
+
+ devlop@1.1.0:
+ resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==}
+
+ didyoumean@1.2.2:
+ resolution: {integrity: sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==}
+
+ diff-match-patch@1.0.5:
+ resolution: {integrity: sha512-IayShXAgj/QMXgB0IWmKx+rOPuGMhqm5w6jvFxmVenXKIzRqTAAsbBPT3kWQeGANj3jGgvcvv4yK6SxqYmikgw==}
+
+ diff@5.2.2:
+ resolution: {integrity: sha512-vtcDfH3TOjP8UekytvnHH1o1P4FcUdt4eQ1Y+Abap1tk/OB2MWQvcwS2ClCd1zuIhc3JKOx6p3kod8Vfys3E+A==}
+ engines: {node: '>=0.3.1'}
+
+ dir-glob@3.0.1:
+ resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==}
+ engines: {node: '>=8'}
+
+ dlv@1.1.3:
+ resolution: {integrity: sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==}
+
+ doctrine@2.1.0:
+ resolution: {integrity: sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==}
+ engines: {node: '>=0.10.0'}
+
+ doctrine@3.0.0:
+ resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==}
+ engines: {node: '>=6.0.0'}
+
+ dunder-proto@1.0.1:
+ resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==}
+ engines: {node: '>= 0.4'}
+
+ eastasianwidth@0.2.0:
+ resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==}
+
+ electron-to-chromium@1.5.331:
+ resolution: {integrity: sha512-IbxXrsTlD3hRodkLnbxAPP4OuJYdWCeM3IOdT+CpcMoIwIoDfCmRpEtSPfwBXxVkg9xmBeY7Lz2Eo2TDn/HC3Q==}
+
+ emoji-regex@8.0.0:
+ resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==}
+
+ emoji-regex@9.2.2:
+ resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==}
+
+ entities@6.0.1:
+ resolution: {integrity: sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==}
+ engines: {node: '>=0.12'}
+
+ error-ex@1.3.4:
+ resolution: {integrity: sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==}
+
+ es-abstract@1.24.1:
+ resolution: {integrity: sha512-zHXBLhP+QehSSbsS9Pt23Gg964240DPd6QCf8WpkqEXxQ7fhdZzYsocOr5u7apWonsS5EjZDmTF+/slGMyasvw==}
+ engines: {node: '>= 0.4'}
+
+ es-define-property@1.0.1:
+ resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==}
+ engines: {node: '>= 0.4'}
+
+ es-errors@1.3.0:
+ resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==}
+ engines: {node: '>= 0.4'}
+
+ es-iterator-helpers@1.3.1:
+ resolution: {integrity: sha512-zWwRvqWiuBPr0muUG/78cW3aHROFCNIQ3zpmYDpwdbnt2m+xlNyRWpHBpa2lJjSBit7BQ+RXA1iwbSmu5yJ/EQ==}
+ engines: {node: '>= 0.4'}
+
+ es-object-atoms@1.1.1:
+ resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==}
+ engines: {node: '>= 0.4'}
+
+ es-set-tostringtag@2.1.0:
+ resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==}
+ engines: {node: '>= 0.4'}
+
+ es-shim-unscopables@1.1.0:
+ resolution: {integrity: sha512-d9T8ucsEhh8Bi1woXCf+TIKDIROLG5WCkxg8geBCbvk22kzwC5G2OnXVMO6FUsvQlgUUXQ2itephWDLqDzbeCw==}
+ engines: {node: '>= 0.4'}
+
+ es-to-primitive@1.3.0:
+ resolution: {integrity: sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g==}
+ engines: {node: '>= 0.4'}
+
+ escalade@3.2.0:
+ resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==}
+ engines: {node: '>=6'}
+
+ escape-string-regexp@4.0.0:
+ resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==}
+ engines: {node: '>=10'}
+
+ escape-string-regexp@5.0.0:
+ resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==}
+ engines: {node: '>=12'}
+
+ eslint-config-next@14.1.0:
+ resolution: {integrity: sha512-SBX2ed7DoRFXC6CQSLc/SbLY9Ut6HxNB2wPTcoIWjUMd7aF7O/SIE7111L8FdZ9TXsNV4pulUDnfthpyPtbFUg==}
+ peerDependencies:
+ eslint: ^7.23.0 || ^8.0.0
+ typescript: '>=3.3.1'
+ peerDependenciesMeta:
+ typescript:
+ optional: true
+
+ eslint-import-resolver-node@0.3.10:
+ resolution: {integrity: sha512-tRrKqFyCaKict5hOd244sL6EQFNycnMQnBe+j8uqGNXYzsImGbGUU4ibtoaBmv5FLwJwcFJNeg1GeVjQfbMrDQ==}
+
+ eslint-import-resolver-typescript@3.10.1:
+ resolution: {integrity: sha512-A1rHYb06zjMGAxdLSkN2fXPBwuSaQ0iO5M/hdyS0Ajj1VBaRp0sPD3dn1FhME3c/JluGFbwSxyCfqdSbtQLAHQ==}
+ engines: {node: ^14.18.0 || >=16.0.0}
+ peerDependencies:
+ eslint: '*'
+ eslint-plugin-import: '*'
+ eslint-plugin-import-x: '*'
+ peerDependenciesMeta:
+ eslint-plugin-import:
+ optional: true
+ eslint-plugin-import-x:
+ optional: true
+
+ eslint-module-utils@2.12.1:
+ resolution: {integrity: sha512-L8jSWTze7K2mTg0vos/RuLRS5soomksDPoJLXIslC7c8Wmut3bx7CPpJijDcBZtxQ5lrbUdM+s0OlNbz0DCDNw==}
+ engines: {node: '>=4'}
+ peerDependencies:
+ '@typescript-eslint/parser': '*'
+ eslint: '*'
+ eslint-import-resolver-node: '*'
+ eslint-import-resolver-typescript: '*'
+ eslint-import-resolver-webpack: '*'
+ peerDependenciesMeta:
+ '@typescript-eslint/parser':
+ optional: true
+ eslint:
+ optional: true
+ eslint-import-resolver-node:
+ optional: true
+ eslint-import-resolver-typescript:
+ optional: true
+ eslint-import-resolver-webpack:
+ optional: true
+
+ eslint-plugin-import@2.32.0:
+ resolution: {integrity: sha512-whOE1HFo/qJDyX4SnXzP4N6zOWn79WhnCUY/iDR0mPfQZO8wcYE4JClzI2oZrhBnnMUCBCHZhO6VQyoBU95mZA==}
+ engines: {node: '>=4'}
+ peerDependencies:
+ '@typescript-eslint/parser': '*'
+ eslint: ^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8 || ^9
+ peerDependenciesMeta:
+ '@typescript-eslint/parser':
+ optional: true
+
+ eslint-plugin-jsx-a11y@6.10.2:
+ resolution: {integrity: sha512-scB3nz4WmG75pV8+3eRUQOHZlNSUhFNq37xnpgRkCCELU3XMvXAxLk1eqWWyE22Ki4Q01Fnsw9BA3cJHDPgn2Q==}
+ engines: {node: '>=4.0'}
+ peerDependencies:
+ eslint: ^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9
+
+ eslint-plugin-react-hooks@5.0.0-canary-7118f5dd7-20230705:
+ resolution: {integrity: sha512-AZYbMo/NW9chdL7vk6HQzQhT+PvTAEVqWk9ziruUoW2kAOcN5qNyelv70e0F1VNQAbvutOC9oc+xfWycI9FxDw==}
+ engines: {node: '>=10'}
+ peerDependencies:
+ eslint: ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0
+
+ eslint-plugin-react@7.37.5:
+ resolution: {integrity: sha512-Qteup0SqU15kdocexFNAJMvCJEfa2xUKNV4CC1xsVMrIIqEy3SQ/rqyxCWNzfrd3/ldy6HMlD2e0JDVpDg2qIA==}
+ engines: {node: '>=4'}
+ peerDependencies:
+ eslint: ^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9.7
+
+ eslint-scope@7.2.2:
+ resolution: {integrity: sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==}
+ engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+
+ eslint-visitor-keys@3.4.3:
+ resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==}
+ engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+
+ eslint@8.57.1:
+ resolution: {integrity: sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==}
+ engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+ deprecated: This version is no longer supported. Please see https://eslint.org/version-support for other options.
+ hasBin: true
+
+ espree@9.6.1:
+ resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==}
+ engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+
+ esprima@4.0.1:
+ resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==}
+ engines: {node: '>=4'}
+ hasBin: true
+
+ esquery@1.7.0:
+ resolution: {integrity: sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==}
+ engines: {node: '>=0.10'}
+
+ esrecurse@4.3.0:
+ resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==}
+ engines: {node: '>=4.0'}
+
+ estraverse@5.3.0:
+ resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==}
+ engines: {node: '>=4.0'}
+
+ estree-util-is-identifier-name@3.0.0:
+ resolution: {integrity: sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==}
+
+ esutils@2.0.3:
+ resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==}
+ engines: {node: '>=0.10.0'}
+
+ execa@7.2.0:
+ resolution: {integrity: sha512-UduyVP7TLB5IcAQl+OzLyLcS/l32W/GLg+AhHJ+ow40FOk2U3SAllPwR44v4vmdFwIWqpdwxxpQbF1n5ta9seA==}
+ engines: {node: ^14.18.0 || ^16.14.0 || >=18.0.0}
+
+ extend@3.0.2:
+ resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==}
+
+ fast-deep-equal@3.1.3:
+ resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==}
+
+ fast-glob@3.3.3:
+ resolution: {integrity: sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==}
+ engines: {node: '>=8.6.0'}
+
+ fast-json-stable-stringify@2.1.0:
+ resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==}
+
+ fast-levenshtein@2.0.6:
+ resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==}
+
+ fastq@1.20.1:
+ resolution: {integrity: sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==}
+
+ fault@1.0.4:
+ resolution: {integrity: sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==}
+
+ fdir@6.5.0:
+ resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==}
+ engines: {node: '>=12.0.0'}
+ peerDependencies:
+ picomatch: ^3 || ^4
+ peerDependenciesMeta:
+ picomatch:
+ optional: true
+
+ fetch-blob@3.2.0:
+ resolution: {integrity: sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==}
+ engines: {node: ^12.20 || >= 14.13}
+
+ file-entry-cache@6.0.1:
+ resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==}
+ engines: {node: ^10.12.0 || >=12.0.0}
+
+ file-selector@2.1.2:
+ resolution: {integrity: sha512-QgXo+mXTe8ljeqUFaX3QVHc5osSItJ/Km+xpocx0aSqWGMSCf6qYs/VnzZgS864Pjn5iceMRFigeAV7AfTlaig==}
+ engines: {node: '>= 12'}
+
+ fill-range@7.1.1:
+ resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==}
+ engines: {node: '>=8'}
+
+ find-up@5.0.0:
+ resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==}
+ engines: {node: '>=10'}
+
+ flat-cache@3.2.0:
+ resolution: {integrity: sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==}
+ engines: {node: ^10.12.0 || >=12.0.0}
+
+ flatted@3.4.2:
+ resolution: {integrity: sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA==}
+
+ for-each@0.3.5:
+ resolution: {integrity: sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==}
+ engines: {node: '>= 0.4'}
+
+ foreground-child@3.3.1:
+ resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==}
+ engines: {node: '>=14'}
+
+ format@0.2.2:
+ resolution: {integrity: sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==}
+ engines: {node: '>=0.4.x'}
+
+ formdata-polyfill@4.0.10:
+ resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==}
+ engines: {node: '>=12.20.0'}
+
+ fraction.js@5.3.4:
+ resolution: {integrity: sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==}
+
+ fs-extra@11.3.4:
+ resolution: {integrity: sha512-CTXd6rk/M3/ULNQj8FBqBWHYBVYybQ3VPBw0xGKFe3tuH7ytT6ACnvzpIQ3UZtB8yvUKC2cXn1a+x+5EVQLovA==}
+ engines: {node: '>=14.14'}
+
+ fs.realpath@1.0.0:
+ resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==}
+
+ fsevents@2.3.3:
+ resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
+ engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
+ os: [darwin]
+
+ function-bind@1.1.2:
+ resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==}
+
+ function.prototype.name@1.1.8:
+ resolution: {integrity: sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q==}
+ engines: {node: '>= 0.4'}
+
+ functions-have-names@1.2.3:
+ resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==}
+
+ generator-function@2.0.1:
+ resolution: {integrity: sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==}
+ engines: {node: '>= 0.4'}
+
+ gensync@1.0.0-beta.2:
+ resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==}
+ engines: {node: '>=6.9.0'}
+
+ get-intrinsic@1.3.0:
+ resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==}
+ engines: {node: '>= 0.4'}
+
+ get-nonce@1.0.1:
+ resolution: {integrity: sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==}
+ engines: {node: '>=6'}
+
+ get-proto@1.0.1:
+ resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==}
+ engines: {node: '>= 0.4'}
+
+ get-stream@6.0.1:
+ resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==}
+ engines: {node: '>=10'}
+
+ get-symbol-description@1.1.0:
+ resolution: {integrity: sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==}
+ engines: {node: '>= 0.4'}
+
+ get-tsconfig@4.13.7:
+ resolution: {integrity: sha512-7tN6rFgBlMgpBML5j8typ92BKFi2sFQvIdpAqLA2beia5avZDrMs0FLZiM5etShWq5irVyGcGMEA1jcDaK7A/Q==}
+
+ glob-parent@5.1.2:
+ resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==}
+ engines: {node: '>= 6'}
+
+ glob-parent@6.0.2:
+ resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==}
+ engines: {node: '>=10.13.0'}
+
+ glob@10.3.10:
+ resolution: {integrity: sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==}
+ engines: {node: '>=16 || 14 >=14.17'}
+ deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
+ hasBin: true
+
+ glob@7.2.3:
+ resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==}
+ deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
+
+ globals@13.24.0:
+ resolution: {integrity: sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==}
+ engines: {node: '>=8'}
+
+ globalthis@1.0.4:
+ resolution: {integrity: sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==}
+ engines: {node: '>= 0.4'}
+
+ globby@11.1.0:
+ resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==}
+ engines: {node: '>=10'}
+
+ gopd@1.2.0:
+ resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==}
+ engines: {node: '>= 0.4'}
+
+ graceful-fs@4.2.11:
+ resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==}
+
+ graphemer@1.4.0:
+ resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==}
+
+ has-bigints@1.1.0:
+ resolution: {integrity: sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg==}
+ engines: {node: '>= 0.4'}
+
+ has-flag@4.0.0:
+ resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==}
+ engines: {node: '>=8'}
+
+ has-property-descriptors@1.0.2:
+ resolution: {integrity: sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==}
+
+ has-proto@1.2.0:
+ resolution: {integrity: sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ==}
+ engines: {node: '>= 0.4'}
+
+ has-symbols@1.1.0:
+ resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==}
+ engines: {node: '>= 0.4'}
+
+ has-tostringtag@1.0.2:
+ resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==}
+ engines: {node: '>= 0.4'}
+
+ hasown@2.0.2:
+ resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==}
+ engines: {node: '>= 0.4'}
+
+ hast-util-from-parse5@8.0.3:
+ resolution: {integrity: sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg==}
+
+ hast-util-is-element@3.0.0:
+ resolution: {integrity: sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g==}
+
+ hast-util-parse-selector@2.2.5:
+ resolution: {integrity: sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ==}
+
+ hast-util-parse-selector@4.0.0:
+ resolution: {integrity: sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==}
+
+ hast-util-raw@9.1.0:
+ resolution: {integrity: sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==}
+
+ hast-util-to-jsx-runtime@2.3.6:
+ resolution: {integrity: sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==}
+
+ hast-util-to-parse5@8.0.1:
+ resolution: {integrity: sha512-MlWT6Pjt4CG9lFCjiz4BH7l9wmrMkfkJYCxFwKQic8+RTZgWPuWxwAfjJElsXkex7DJjfSJsQIt931ilUgmwdA==}
+
+ hast-util-to-text@4.0.2:
+ resolution: {integrity: sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A==}
+
+ hast-util-whitespace@3.0.0:
+ resolution: {integrity: sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==}
+
+ hastscript@6.0.0:
+ resolution: {integrity: sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==}
+
+ hastscript@9.0.1:
+ resolution: {integrity: sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w==}
+
+ highlight.js@10.7.3:
+ resolution: {integrity: sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==}
+
+ highlight.js@11.11.1:
+ resolution: {integrity: sha512-Xwwo44whKBVCYoliBQwaPvtd/2tYFkRQtXDWj1nackaV2JPXx3L0+Jvd8/qCJ2p+ML0/XVkJ2q+Mr+UVdpJK5w==}
+ engines: {node: '>=12.0.0'}
+
+ highlightjs-vue@1.0.0:
+ resolution: {integrity: sha512-PDEfEF102G23vHmPhLyPboFCD+BkMGu+GuJe2d9/eH4FsCwvgBpnc9n0pGE+ffKdph38s6foEZiEjdgHdzp+IA==}
+
+ html-url-attributes@3.0.1:
+ resolution: {integrity: sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==}
+
+ html-void-elements@3.0.0:
+ resolution: {integrity: sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==}
+
+ https-proxy-agent@6.2.1:
+ resolution: {integrity: sha512-ONsE3+yfZF2caH5+bJlcddtWqNI3Gvs5A38+ngvljxaBiRXRswym2c7yf8UAeFpRFKjFNHIFEHqR/OLAWJzyiA==}
+ engines: {node: '>= 14'}
+
+ human-signals@4.3.1:
+ resolution: {integrity: sha512-nZXjEF2nbo7lIw3mgYjItAfgQXog3OjJogSbKa2CQIIvSGWcKgeJnQlNXip6NglNzYH45nSRiEVimMvYL8DDqQ==}
+ engines: {node: '>=14.18.0'}
+
+ ieee754@1.2.1:
+ resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==}
+
+ ignore@5.3.2:
+ resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==}
+ engines: {node: '>= 4'}
+
+ import-fresh@3.3.1:
+ resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==}
+ engines: {node: '>=6'}
+
+ imurmurhash@0.1.4:
+ resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==}
+ engines: {node: '>=0.8.19'}
+
+ inflight@1.0.6:
+ resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==}
+ deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.
+
+ inherits@2.0.4:
+ resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==}
+
+ inline-style-parser@0.2.7:
+ resolution: {integrity: sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==}
+
+ internal-slot@1.1.0:
+ resolution: {integrity: sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==}
+ engines: {node: '>= 0.4'}
+
+ is-alphabetical@1.0.4:
+ resolution: {integrity: sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==}
+
+ is-alphabetical@2.0.1:
+ resolution: {integrity: sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==}
+
+ is-alphanumerical@1.0.4:
+ resolution: {integrity: sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==}
+
+ is-alphanumerical@2.0.1:
+ resolution: {integrity: sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==}
+
+ is-array-buffer@3.0.5:
+ resolution: {integrity: sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==}
+ engines: {node: '>= 0.4'}
+
+ is-arrayish@0.2.1:
+ resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==}
+
+ is-async-function@2.1.1:
+ resolution: {integrity: sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ==}
+ engines: {node: '>= 0.4'}
+
+ is-bigint@1.1.0:
+ resolution: {integrity: sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ==}
+ engines: {node: '>= 0.4'}
+
+ is-binary-path@2.1.0:
+ resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==}
+ engines: {node: '>=8'}
+
+ is-boolean-object@1.2.2:
+ resolution: {integrity: sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A==}
+ engines: {node: '>= 0.4'}
+
+ is-bun-module@2.0.0:
+ resolution: {integrity: sha512-gNCGbnnnnFAUGKeZ9PdbyeGYJqewpmc2aKHUEMO5nQPWU9lOmv7jcmQIv+qHD8fXW6W7qfuCwX4rY9LNRjXrkQ==}
+
+ is-callable@1.2.7:
+ resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==}
+ engines: {node: '>= 0.4'}
+
+ is-core-module@2.16.1:
+ resolution: {integrity: sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==}
+ engines: {node: '>= 0.4'}
+
+ is-data-view@1.0.2:
+ resolution: {integrity: sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw==}
+ engines: {node: '>= 0.4'}
+
+ is-date-object@1.1.0:
+ resolution: {integrity: sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==}
+ engines: {node: '>= 0.4'}
+
+ is-decimal@1.0.4:
+ resolution: {integrity: sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==}
+
+ is-decimal@2.0.1:
+ resolution: {integrity: sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==}
+
+ is-extglob@2.1.1:
+ resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==}
+ engines: {node: '>=0.10.0'}
+
+ is-finalizationregistry@1.1.1:
+ resolution: {integrity: sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg==}
+ engines: {node: '>= 0.4'}
+
+ is-fullwidth-code-point@3.0.0:
+ resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==}
+ engines: {node: '>=8'}
+
+ is-generator-function@1.1.2:
+ resolution: {integrity: sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==}
+ engines: {node: '>= 0.4'}
+
+ is-glob@4.0.3:
+ resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
+ engines: {node: '>=0.10.0'}
+
+ is-hexadecimal@1.0.4:
+ resolution: {integrity: sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==}
+
+ is-hexadecimal@2.0.1:
+ resolution: {integrity: sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==}
+
+ is-interactive@2.0.0:
+ resolution: {integrity: sha512-qP1vozQRI+BMOPcjFzrjXuQvdak2pHNUMZoeG2eRbiSqyvbEf/wQtEOTOX1guk6E3t36RkaqiSt8A/6YElNxLQ==}
+ engines: {node: '>=12'}
+
+ is-map@2.0.3:
+ resolution: {integrity: sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==}
+ engines: {node: '>= 0.4'}
+
+ is-negative-zero@2.0.3:
+ resolution: {integrity: sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==}
+ engines: {node: '>= 0.4'}
+
+ is-number-object@1.1.1:
+ resolution: {integrity: sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw==}
+ engines: {node: '>= 0.4'}
+
+ is-number@7.0.0:
+ resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==}
+ engines: {node: '>=0.12.0'}
+
+ is-path-inside@3.0.3:
+ resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==}
+ engines: {node: '>=8'}
+
+ is-plain-obj@4.1.0:
+ resolution: {integrity: sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==}
+ engines: {node: '>=12'}
+
+ is-regex@1.2.1:
+ resolution: {integrity: sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==}
+ engines: {node: '>= 0.4'}
+
+ is-set@2.0.3:
+ resolution: {integrity: sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==}
+ engines: {node: '>= 0.4'}
+
+ is-shared-array-buffer@1.0.4:
+ resolution: {integrity: sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A==}
+ engines: {node: '>= 0.4'}
+
+ is-stream@3.0.0:
+ resolution: {integrity: sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
+ is-string@1.1.1:
+ resolution: {integrity: sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA==}
+ engines: {node: '>= 0.4'}
+
+ is-symbol@1.1.1:
+ resolution: {integrity: sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w==}
+ engines: {node: '>= 0.4'}
+
+ is-typed-array@1.1.15:
+ resolution: {integrity: sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==}
+ engines: {node: '>= 0.4'}
+
+ is-unicode-supported@1.3.0:
+ resolution: {integrity: sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ==}
+ engines: {node: '>=12'}
+
+ is-weakmap@2.0.2:
+ resolution: {integrity: sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==}
+ engines: {node: '>= 0.4'}
+
+ is-weakref@1.1.1:
+ resolution: {integrity: sha512-6i9mGWSlqzNMEqpCp93KwRS1uUOodk2OJ6b+sq7ZPDSy2WuI5NFIxp/254TytR8ftefexkWn5xNiHUNpPOfSew==}
+ engines: {node: '>= 0.4'}
+
+ is-weakset@2.0.4:
+ resolution: {integrity: sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==}
+ engines: {node: '>= 0.4'}
+
+ isarray@2.0.5:
+ resolution: {integrity: sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==}
+
+ isexe@2.0.0:
+ resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==}
+
+ iterator.prototype@1.1.5:
+ resolution: {integrity: sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g==}
+ engines: {node: '>= 0.4'}
+
+ jackspeak@2.3.6:
+ resolution: {integrity: sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==}
+ engines: {node: '>=14'}
+
+ jiti@1.21.7:
+ resolution: {integrity: sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==}
+ hasBin: true
+
+ js-tokens@4.0.0:
+ resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==}
+
+ js-yaml@4.1.1:
+ resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==}
+ hasBin: true
+
+ jsesc@3.1.0:
+ resolution: {integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==}
+ engines: {node: '>=6'}
+ hasBin: true
+
+ json-buffer@3.0.1:
+ resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==}
+
+ json-parse-even-better-errors@2.3.1:
+ resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==}
+
+ json-schema-traverse@0.4.1:
+ resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==}
+
+ json-schema@0.4.0:
+ resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==}
+
+ json-stable-stringify-without-jsonify@1.0.1:
+ resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==}
+
+ json5@1.0.2:
+ resolution: {integrity: sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==}
+ hasBin: true
+
+ json5@2.2.3:
+ resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==}
+ engines: {node: '>=6'}
+ hasBin: true
+
+ jsondiffpatch@0.6.0:
+ resolution: {integrity: sha512-3QItJOXp2AP1uv7waBkao5nCvhEv+QmJAd38Ybq7wNI74Q+BBmnLn4EDKz6yI9xGAIQoUF87qHt+kc1IVxB4zQ==}
+ engines: {node: ^18.0.0 || >=20.0.0}
+ hasBin: true
+
+ jsonfile@6.2.0:
+ resolution: {integrity: sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==}
+
+ jsx-ast-utils@3.3.5:
+ resolution: {integrity: sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==}
+ engines: {node: '>=4.0'}
+
+ keyv@4.5.4:
+ resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==}
+
+ kleur@3.0.3:
+ resolution: {integrity: sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==}
+ engines: {node: '>=6'}
+
+ language-subtag-registry@0.3.23:
+ resolution: {integrity: sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==}
+
+ language-tags@1.0.9:
+ resolution: {integrity: sha512-MbjN408fEndfiQXbFQ1vnd+1NoLDsnQW41410oQBXiyXDMYH5z505juWa4KUE1LqxRC7DgOgZDbKLxHIwm27hA==}
+ engines: {node: '>=0.10'}
+
+ levn@0.4.1:
+ resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==}
+ engines: {node: '>= 0.8.0'}
+
+ lilconfig@3.1.3:
+ resolution: {integrity: sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==}
+ engines: {node: '>=14'}
+
+ lines-and-columns@1.2.4:
+ resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==}
+
+ locate-path@6.0.0:
+ resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==}
+ engines: {node: '>=10'}
+
+ lodash._reinterpolate@3.0.0:
+ resolution: {integrity: sha512-xYHt68QRoYGjeeM/XOE1uJtvXQAgvszfBhjV4yvsQH0u2i9I6cI6c6/eG4Hh3UAOVn0y/xAXwmTzEay49Q//HA==}
+
+ lodash.merge@4.6.2:
+ resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==}
+
+ lodash.template@4.18.1:
+ resolution: {integrity: sha512-5urZrLnV/VD6zHK5KsVtZgt7H19v51mIzoS0aBNH8yp3I8tbswrEjOABOPY8m8uB7NuibubLrMX+Y0PXsU9X+w==}
+ deprecated: This package is deprecated. Use https://socket.dev/npm/package/eta instead.
+
+ lodash.templatesettings@4.2.0:
+ resolution: {integrity: sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ==}
+
+ log-symbols@5.1.0:
+ resolution: {integrity: sha512-l0x2DvrW294C9uDCoQe1VSU4gf529FkSZ6leBl4TiqZH/e+0R7hSfHQBNut2mNygDgHwvYHfFLn6Oxb3VWj2rA==}
+ engines: {node: '>=12'}
+
+ longest-streak@3.1.0:
+ resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==}
+
+ loose-envify@1.4.0:
+ resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==}
+ hasBin: true
+
+ lowlight@1.20.0:
+ resolution: {integrity: sha512-8Ktj+prEb1RoCPkEOrPMYUN/nCggB7qAWe3a7OpMjWQkh3l2RD5wKRQ+o8Q8YuI9RG/xs95waaI/E6ym/7NsTw==}
+
+ lowlight@3.3.0:
+ resolution: {integrity: sha512-0JNhgFoPvP6U6lE/UdVsSq99tn6DhjjpAj5MxG49ewd2mOBVtwWYIT8ClyABhq198aXXODMU6Ox8DrGy/CpTZQ==}
+
+ lru-cache@10.4.3:
+ resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==}
+
+ lru-cache@5.1.1:
+ resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==}
+
+ lucide-react@0.323.0:
+ resolution: {integrity: sha512-rTXZFILl2Y4d1SG9p1Mdcf17AcPvPvpc/egFIzUrp7IUy60MUQo3Oi1mu8LGYXUVwuRZYsSMt3csHRW5mAovJg==}
+ peerDependencies:
+ react: ^16.5.1 || ^17.0.0 || ^18.0.0
+
+ markdown-table@3.0.4:
+ resolution: {integrity: sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==}
+
+ math-intrinsics@1.1.0:
+ resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==}
+ engines: {node: '>= 0.4'}
+
+ mdast-util-find-and-replace@3.0.2:
+ resolution: {integrity: sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==}
+
+ mdast-util-from-markdown@2.0.3:
+ resolution: {integrity: sha512-W4mAWTvSlKvf8L6J+VN9yLSqQ9AOAAvHuoDAmPkz4dHf553m5gVj2ejadHJhoJmcmxEnOv6Pa8XJhpxE93kb8Q==}
+
+ mdast-util-gfm-autolink-literal@2.0.1:
+ resolution: {integrity: sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==}
+
+ mdast-util-gfm-footnote@2.1.0:
+ resolution: {integrity: sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==}
+
+ mdast-util-gfm-strikethrough@2.0.0:
+ resolution: {integrity: sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==}
+
+ mdast-util-gfm-table@2.0.0:
+ resolution: {integrity: sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==}
+
+ mdast-util-gfm-task-list-item@2.0.0:
+ resolution: {integrity: sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==}
+
+ mdast-util-gfm@3.1.0:
+ resolution: {integrity: sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==}
+
+ mdast-util-mdx-expression@2.0.1:
+ resolution: {integrity: sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==}
+
+ mdast-util-mdx-jsx@3.2.0:
+ resolution: {integrity: sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==}
+
+ mdast-util-mdxjs-esm@2.0.1:
+ resolution: {integrity: sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==}
+
+ mdast-util-phrasing@4.1.0:
+ resolution: {integrity: sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==}
+
+ mdast-util-to-hast@13.2.1:
+ resolution: {integrity: sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==}
+
+ mdast-util-to-markdown@2.1.2:
+ resolution: {integrity: sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==}
+
+ mdast-util-to-string@4.0.0:
+ resolution: {integrity: sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==}
+
+ merge-stream@2.0.0:
+ resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==}
+
+ merge2@1.4.1:
+ resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==}
+ engines: {node: '>= 8'}
+
+ micromark-core-commonmark@2.0.3:
+ resolution: {integrity: sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==}
+
+ micromark-extension-gfm-autolink-literal@2.1.0:
+ resolution: {integrity: sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==}
+
+ micromark-extension-gfm-footnote@2.1.0:
+ resolution: {integrity: sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==}
+
+ micromark-extension-gfm-strikethrough@2.1.0:
+ resolution: {integrity: sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==}
+
+ micromark-extension-gfm-table@2.1.1:
+ resolution: {integrity: sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==}
+
+ micromark-extension-gfm-tagfilter@2.0.0:
+ resolution: {integrity: sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==}
+
+ micromark-extension-gfm-task-list-item@2.1.0:
+ resolution: {integrity: sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==}
+
+ micromark-extension-gfm@3.0.0:
+ resolution: {integrity: sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==}
+
+ micromark-factory-destination@2.0.1:
+ resolution: {integrity: sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==}
+
+ micromark-factory-label@2.0.1:
+ resolution: {integrity: sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==}
+
+ micromark-factory-space@2.0.1:
+ resolution: {integrity: sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==}
+
+ micromark-factory-title@2.0.1:
+ resolution: {integrity: sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==}
+
+ micromark-factory-whitespace@2.0.1:
+ resolution: {integrity: sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==}
+
+ micromark-util-character@2.1.1:
+ resolution: {integrity: sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==}
+
+ micromark-util-chunked@2.0.1:
+ resolution: {integrity: sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==}
+
+ micromark-util-classify-character@2.0.1:
+ resolution: {integrity: sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==}
+
+ micromark-util-combine-extensions@2.0.1:
+ resolution: {integrity: sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==}
+
+ micromark-util-decode-numeric-character-reference@2.0.2:
+ resolution: {integrity: sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==}
+
+ micromark-util-decode-string@2.0.1:
+ resolution: {integrity: sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==}
+
+ micromark-util-encode@2.0.1:
+ resolution: {integrity: sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==}
+
+ micromark-util-html-tag-name@2.0.1:
+ resolution: {integrity: sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==}
+
+ micromark-util-normalize-identifier@2.0.1:
+ resolution: {integrity: sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==}
+
+ micromark-util-resolve-all@2.0.1:
+ resolution: {integrity: sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==}
+
+ micromark-util-sanitize-uri@2.0.1:
+ resolution: {integrity: sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==}
+
+ micromark-util-subtokenize@2.1.0:
+ resolution: {integrity: sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==}
+
+ micromark-util-symbol@2.0.1:
+ resolution: {integrity: sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==}
+
+ micromark-util-types@2.0.2:
+ resolution: {integrity: sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==}
+
+ micromark@4.0.2:
+ resolution: {integrity: sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==}
+
+ micromatch@4.0.8:
+ resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==}
+ engines: {node: '>=8.6'}
+
+ mimic-fn@2.1.0:
+ resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==}
+ engines: {node: '>=6'}
+
+ mimic-fn@4.0.0:
+ resolution: {integrity: sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==}
+ engines: {node: '>=12'}
+
+ minimatch@3.1.5:
+ resolution: {integrity: sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==}
+
+ minimatch@7.4.9:
+ resolution: {integrity: sha512-Brg/fp/iAVDOQoHxkuN5bEYhyQlZhxddI78yWsCbeEwTHXQjlNLtiJDUsp1GIptVqMI7/gkJMz4vVAc01mpoBw==}
+ engines: {node: '>=10'}
+
+ minimatch@9.0.3:
+ resolution: {integrity: sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==}
+ engines: {node: '>=16 || 14 >=14.17'}
+
+ minimatch@9.0.9:
+ resolution: {integrity: sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg==}
+ engines: {node: '>=16 || 14 >=14.17'}
+
+ minimist@1.2.8:
+ resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
+
+ minipass@7.1.3:
+ resolution: {integrity: sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A==}
+ engines: {node: '>=16 || 14 >=14.17'}
+
+ mkdirp@2.1.6:
+ resolution: {integrity: sha512-+hEnITedc8LAtIP9u3HJDFIdcLV2vXP33sqLLIzkv1Db1zO/1OxbvYf0Y1OC/S/Qo5dxHXepofhmxL02PsKe+A==}
+ engines: {node: '>=10'}
+ hasBin: true
+
+ ms@2.1.3:
+ resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==}
+
+ mz@2.7.0:
+ resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==}
+
+ nanoid@3.3.11:
+ resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==}
+ engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
+ hasBin: true
+
+ napi-postinstall@0.3.4:
+ resolution: {integrity: sha512-PHI5f1O0EP5xJ9gQmFGMS6IZcrVvTjpXjz7Na41gTE7eE2hK11lg04CECCYEEjdc17EV4DO+fkGEtt7TpTaTiQ==}
+ engines: {node: ^12.20.0 || ^14.18.0 || >=16.0.0}
+ hasBin: true
+
+ natural-compare@1.4.0:
+ resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==}
+
+ next-themes@0.2.1:
+ resolution: {integrity: sha512-B+AKNfYNIzh0vqQQKqQItTS8evEouKD7H5Hj3kmuPERwddR2TxvDSFZuTj6T7Jfn1oyeUyJMydPl1Bkxkh0W7A==}
+ peerDependencies:
+ next: '*'
+ react: '*'
+ react-dom: '*'
+
+ next@14.2.32:
+ resolution: {integrity: sha512-fg5g0GZ7/nFc09X8wLe6pNSU8cLWbLRG3TZzPJ1BJvi2s9m7eF991se67wliM9kR5yLHRkyGKU49MMx58s3LJg==}
+ engines: {node: '>=18.17.0'}
+ deprecated: This version has a security vulnerability. Please upgrade to a patched version. See https://nextjs.org/blog/security-update-2025-12-11 for more details.
+ hasBin: true
+ peerDependencies:
+ '@opentelemetry/api': ^1.1.0
+ '@playwright/test': ^1.41.2
+ react: ^18.2.0
+ react-dom: ^18.2.0
+ sass: ^1.3.0
+ peerDependenciesMeta:
+ '@opentelemetry/api':
+ optional: true
+ '@playwright/test':
+ optional: true
+ sass:
+ optional: true
+
+ node-domexception@1.0.0:
+ resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==}
+ engines: {node: '>=10.5.0'}
+ deprecated: Use your platform's native DOMException instead
+
+ node-exports-info@1.6.0:
+ resolution: {integrity: sha512-pyFS63ptit/P5WqUkt+UUfe+4oevH+bFeIiPPdfb0pFeYEu/1ELnJu5l+5EcTKYL5M7zaAa7S8ddywgXypqKCw==}
+ engines: {node: '>= 0.4'}
+
+ node-fetch@3.3.2:
+ resolution: {integrity: sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
+ node-releases@2.0.37:
+ resolution: {integrity: sha512-1h5gKZCF+pO/o3Iqt5Jp7wc9rH3eJJ0+nh/CIoiRwjRxde/hAHyLPXYN4V3CqKAbiZPSeJFSWHmJsbkicta0Eg==}
+
+ normalize-path@3.0.0:
+ resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==}
+ engines: {node: '>=0.10.0'}
+
+ npm-run-path@5.3.0:
+ resolution: {integrity: sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
+ object-assign@4.1.1:
+ resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==}
+ engines: {node: '>=0.10.0'}
+
+ object-hash@3.0.0:
+ resolution: {integrity: sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==}
+ engines: {node: '>= 6'}
+
+ object-inspect@1.13.4:
+ resolution: {integrity: sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==}
+ engines: {node: '>= 0.4'}
+
+ object-keys@1.1.1:
+ resolution: {integrity: sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==}
+ engines: {node: '>= 0.4'}
+
+ object.assign@4.1.7:
+ resolution: {integrity: sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==}
+ engines: {node: '>= 0.4'}
+
+ object.entries@1.1.9:
+ resolution: {integrity: sha512-8u/hfXFRBD1O0hPUjioLhoWFHRmt6tKA4/vZPyckBr18l1KE9uHrFaFaUi8MDRTpi4uak2goyPTSNJLXX2k2Hw==}
+ engines: {node: '>= 0.4'}
+
+ object.fromentries@2.0.8:
+ resolution: {integrity: sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ==}
+ engines: {node: '>= 0.4'}
+
+ object.groupby@1.0.3:
+ resolution: {integrity: sha512-+Lhy3TQTuzXI5hevh8sBGqbmurHbbIjAi0Z4S63nthVLmLxfbj4T54a4CfZrXIrt9iP4mVAPYMo/v99taj3wjQ==}
+ engines: {node: '>= 0.4'}
+
+ object.values@1.2.1:
+ resolution: {integrity: sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA==}
+ engines: {node: '>= 0.4'}
+
+ once@1.4.0:
+ resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
+
+ onetime@5.1.2:
+ resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==}
+ engines: {node: '>=6'}
+
+ onetime@6.0.0:
+ resolution: {integrity: sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==}
+ engines: {node: '>=12'}
+
+ optionator@0.9.4:
+ resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==}
+ engines: {node: '>= 0.8.0'}
+
+ ora@6.3.1:
+ resolution: {integrity: sha512-ERAyNnZOfqM+Ao3RAvIXkYh5joP220yf59gVe2X/cI6SiCxIdi4c9HZKZD8R6q/RDXEje1THBju6iExiSsgJaQ==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
+ own-keys@1.0.1:
+ resolution: {integrity: sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg==}
+ engines: {node: '>= 0.4'}
+
+ p-limit@3.1.0:
+ resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==}
+ engines: {node: '>=10'}
+
+ p-locate@5.0.0:
+ resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==}
+ engines: {node: '>=10'}
+
+ parent-module@1.0.1:
+ resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==}
+ engines: {node: '>=6'}
+
+ parse-entities@2.0.0:
+ resolution: {integrity: sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==}
+
+ parse-entities@4.0.2:
+ resolution: {integrity: sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==}
+
+ parse-json@5.2.0:
+ resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==}
+ engines: {node: '>=8'}
+
+ parse5@7.3.0:
+ resolution: {integrity: sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==}
+
+ path-browserify@1.0.1:
+ resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==}
+
+ path-exists@4.0.0:
+ resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==}
+ engines: {node: '>=8'}
+
+ path-is-absolute@1.0.1:
+ resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==}
+ engines: {node: '>=0.10.0'}
+
+ path-key@3.1.1:
+ resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==}
+ engines: {node: '>=8'}
+
+ path-key@4.0.0:
+ resolution: {integrity: sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==}
+ engines: {node: '>=12'}
+
+ path-parse@1.0.7:
+ resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==}
+
+ path-scurry@1.11.1:
+ resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==}
+ engines: {node: '>=16 || 14 >=14.18'}
+
+ path-type@4.0.0:
+ resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==}
+ engines: {node: '>=8'}
+
+ picocolors@1.1.1:
+ resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==}
+
+ picomatch@2.3.2:
+ resolution: {integrity: sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==}
+ engines: {node: '>=8.6'}
+
+ picomatch@4.0.4:
+ resolution: {integrity: sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==}
+ engines: {node: '>=12'}
+
+ pify@2.3.0:
+ resolution: {integrity: sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==}
+ engines: {node: '>=0.10.0'}
+
+ pirates@4.0.7:
+ resolution: {integrity: sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==}
+ engines: {node: '>= 6'}
+
+ possible-typed-array-names@1.1.0:
+ resolution: {integrity: sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==}
+ engines: {node: '>= 0.4'}
+
+ postcss-import@15.1.0:
+ resolution: {integrity: sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==}
+ engines: {node: '>=14.0.0'}
+ peerDependencies:
+ postcss: ^8.0.0
+
+ postcss-js@4.1.0:
+ resolution: {integrity: sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==}
+ engines: {node: ^12 || ^14 || >= 16}
+ peerDependencies:
+ postcss: ^8.4.21
+
+ postcss-load-config@6.0.1:
+ resolution: {integrity: sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==}
+ engines: {node: '>= 18'}
+ peerDependencies:
+ jiti: '>=1.21.0'
+ postcss: '>=8.0.9'
+ tsx: ^4.8.1
+ yaml: ^2.4.2
+ peerDependenciesMeta:
+ jiti:
+ optional: true
+ postcss:
+ optional: true
+ tsx:
+ optional: true
+ yaml:
+ optional: true
+
+ postcss-nested@6.2.0:
+ resolution: {integrity: sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==}
+ engines: {node: '>=12.0'}
+ peerDependencies:
+ postcss: ^8.2.14
+
+ postcss-selector-parser@6.1.2:
+ resolution: {integrity: sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==}
+ engines: {node: '>=4'}
+
+ postcss-value-parser@4.2.0:
+ resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==}
+
+ postcss@8.4.31:
+ resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==}
+ engines: {node: ^10 || ^12 || >=14}
+
+ postcss@8.5.8:
+ resolution: {integrity: sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==}
+ engines: {node: ^10 || ^12 || >=14}
+
+ prelude-ls@1.2.1:
+ resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==}
+ engines: {node: '>= 0.8.0'}
+
+ prismjs@1.27.0:
+ resolution: {integrity: sha512-t13BGPUlFDR7wRB5kQDG4jjl7XeuH6jbJGt11JHPL96qwsEHNX2+68tFXqc1/k+/jALsbSWJKUOT/hcYAZ5LkA==}
+ engines: {node: '>=6'}
+
+ prismjs@1.30.0:
+ resolution: {integrity: sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==}
+ engines: {node: '>=6'}
+
+ prompts@2.4.2:
+ resolution: {integrity: sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==}
+ engines: {node: '>= 6'}
+
+ prop-types@15.8.1:
+ resolution: {integrity: sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==}
+
+ property-information@5.6.0:
+ resolution: {integrity: sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==}
+
+ property-information@7.1.0:
+ resolution: {integrity: sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==}
+
+ punycode@2.3.1:
+ resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==}
+ engines: {node: '>=6'}
+
+ queue-microtask@1.2.3:
+ resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==}
+
+ react-dom@18.3.1:
+ resolution: {integrity: sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==}
+ peerDependencies:
+ react: ^18.3.1
+
+ react-dropzone@14.4.1:
+ resolution: {integrity: sha512-QDuV76v3uKbHiH34SpwifZ+gOLi1+RdsCO1kl5vxMT4wW8R82+sthjvBw4th3NHF/XX6FBsqDYZVNN+pnhaw0g==}
+ engines: {node: '>= 10.13'}
+ peerDependencies:
+ react: '>= 16.8 || 18.0.0'
+
+ react-file-icon@1.6.0:
+ resolution: {integrity: sha512-Ba4Qa2ya/kvhcCd4LJja77sV7JD7u1ZXcI1DUz+TII3nGmglG6QY+NZeHizThokgct3qI0glwb9eV8NqRGs5lw==}
+ peerDependencies:
+ react: ^19.0.0 || ^18.0.0 || ^17.0.0 || ^16.2.0
+ react-dom: ^19.0.0 || ^18.0.0 || ^17.0.0 || ^16.2.0
+
+ react-is@16.13.1:
+ resolution: {integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==}
+
+ react-markdown@9.1.0:
+ resolution: {integrity: sha512-xaijuJB0kzGiUdG7nc2MOMDUDBWPyGAjZtUrow9XxUeua8IqeP+VlIfAZ3bphpcLTnSZXz6z9jcVC/TCwbfgdw==}
+ peerDependencies:
+ '@types/react': '>=18'
+ react: '>=18'
+
+ react-remove-scroll-bar@2.3.8:
+ resolution: {integrity: sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q==}
+ engines: {node: '>=10'}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ react-remove-scroll@2.7.2:
+ resolution: {integrity: sha512-Iqb9NjCCTt6Hf+vOdNIZGdTiH1QSqr27H/Ek9sv/a97gfueI/5h1s3yRi1nngzMUaOOToin5dI1dXKdXiF+u0Q==}
+ engines: {node: '>=10'}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ react-style-singleton@2.2.3:
+ resolution: {integrity: sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ==}
+ engines: {node: '>=10'}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ react-syntax-highlighter@15.6.6:
+ resolution: {integrity: sha512-DgXrc+AZF47+HvAPEmn7Ua/1p10jNoVZVI/LoPiYdtY+OM+/nG5yefLHKJwdKqY1adMuHFbeyBaG9j64ML7vTw==}
+ peerDependencies:
+ react: '>= 0.14.0'
+
+ react@18.3.1:
+ resolution: {integrity: sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==}
+ engines: {node: '>=0.10.0'}
+
+ read-cache@1.0.0:
+ resolution: {integrity: sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==}
+
+ readable-stream@3.6.2:
+ resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==}
+ engines: {node: '>= 6'}
+
+ readdirp@3.6.0:
+ resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==}
+ engines: {node: '>=8.10.0'}
+
+ recast@0.23.11:
+ resolution: {integrity: sha512-YTUo+Flmw4ZXiWfQKGcwwc11KnoRAYgzAE2E7mXKCjSviTKShtxBsN6YUUBB2gtaBzKzeKunxhUwNHQuRryhWA==}
+ engines: {node: '>= 4'}
+
+ reflect.getprototypeof@1.0.10:
+ resolution: {integrity: sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw==}
+ engines: {node: '>= 0.4'}
+
+ refractor@3.6.0:
+ resolution: {integrity: sha512-MY9W41IOWxxk31o+YvFCNyNzdkc9M20NoZK5vq6jkv4I/uh2zkWcfudj0Q1fovjUQJrNewS9NMzeTtqPf+n5EA==}
+
+ regexp.prototype.flags@1.5.4:
+ resolution: {integrity: sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA==}
+ engines: {node: '>= 0.4'}
+
+ rehype-highlight@7.0.2:
+ resolution: {integrity: sha512-k158pK7wdC2qL3M5NcZROZ2tR/l7zOzjxXd5VGdcfIyoijjQqpHd3JKtYSBDpDZ38UI2WJWuFAtkMDxmx5kstA==}
+
+ rehype-raw@7.0.0:
+ resolution: {integrity: sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==}
+
+ remark-gfm@4.0.1:
+ resolution: {integrity: sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==}
+
+ remark-parse@11.0.0:
+ resolution: {integrity: sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==}
+
+ remark-rehype@11.1.2:
+ resolution: {integrity: sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==}
+
+ remark-stringify@11.0.0:
+ resolution: {integrity: sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==}
+
+ resolve-from@4.0.0:
+ resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==}
+ engines: {node: '>=4'}
+
+ resolve-pkg-maps@1.0.0:
+ resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
+
+ resolve@1.22.11:
+ resolution: {integrity: sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==}
+ engines: {node: '>= 0.4'}
+ hasBin: true
+
+ resolve@2.0.0-next.6:
+ resolution: {integrity: sha512-3JmVl5hMGtJ3kMmB3zi3DL25KfkCEyy3Tw7Gmw7z5w8M9WlwoPFnIvwChzu1+cF3iaK3sp18hhPz8ANeimdJfA==}
+ engines: {node: '>= 0.4'}
+ hasBin: true
+
+ restore-cursor@4.0.0:
+ resolution: {integrity: sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
+ reusify@1.1.0:
+ resolution: {integrity: sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==}
+ engines: {iojs: '>=1.0.0', node: '>=0.10.0'}
+
+ rimraf@3.0.2:
+ resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==}
+ deprecated: Rimraf versions prior to v4 are no longer supported
+ hasBin: true
+
+ run-parallel@1.2.0:
+ resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==}
+
+ safe-array-concat@1.1.3:
+ resolution: {integrity: sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==}
+ engines: {node: '>=0.4'}
+
+ safe-buffer@5.2.1:
+ resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==}
+
+ safe-push-apply@1.0.0:
+ resolution: {integrity: sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA==}
+ engines: {node: '>= 0.4'}
+
+ safe-regex-test@1.1.0:
+ resolution: {integrity: sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==}
+ engines: {node: '>= 0.4'}
+
+ scheduler@0.23.2:
+ resolution: {integrity: sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==}
+
+ secure-json-parse@2.7.0:
+ resolution: {integrity: sha512-6aU+Rwsezw7VR8/nyvKTx8QpWH9FrcYiXXlqC4z5d5XQBDRqtbfsRjnwGyqbi3gddNtWHuEk9OANUotL26qKUw==}
+
+ semver@6.3.1:
+ resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==}
+ hasBin: true
+
+ semver@7.7.4:
+ resolution: {integrity: sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==}
+ engines: {node: '>=10'}
+ hasBin: true
+
+ set-function-length@1.2.2:
+ resolution: {integrity: sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==}
+ engines: {node: '>= 0.4'}
+
+ set-function-name@2.0.2:
+ resolution: {integrity: sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==}
+ engines: {node: '>= 0.4'}
+
+ set-proto@1.0.0:
+ resolution: {integrity: sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw==}
+ engines: {node: '>= 0.4'}
+
+ shadcn-ui@0.8.0:
+ resolution: {integrity: sha512-avqRgjJ6PIQQXdfvoCAWQpyLTLk6oHhtU5DQKmLeYcgu1ZIsgMqA9MKWAkr0HpEdCAenCCZvFbvJ2C2m5ZXRiA==}
+ hasBin: true
+
+ shebang-command@2.0.0:
+ resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==}
+ engines: {node: '>=8'}
+
+ shebang-regex@3.0.0:
+ resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==}
+ engines: {node: '>=8'}
+
+ side-channel-list@1.0.0:
+ resolution: {integrity: sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==}
+ engines: {node: '>= 0.4'}
+
+ side-channel-map@1.0.1:
+ resolution: {integrity: sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==}
+ engines: {node: '>= 0.4'}
+
+ side-channel-weakmap@1.0.2:
+ resolution: {integrity: sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==}
+ engines: {node: '>= 0.4'}
+
+ side-channel@1.1.0:
+ resolution: {integrity: sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==}
+ engines: {node: '>= 0.4'}
+
+ signal-exit@3.0.7:
+ resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==}
+
+ signal-exit@4.1.0:
+ resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==}
+ engines: {node: '>=14'}
+
+ sisteransi@1.0.5:
+ resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==}
+
+ slash@3.0.0:
+ resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==}
+ engines: {node: '>=8'}
+
+ source-map-js@1.2.1:
+ resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==}
+ engines: {node: '>=0.10.0'}
+
+ source-map@0.6.1:
+ resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==}
+ engines: {node: '>=0.10.0'}
+
+ space-separated-tokens@1.1.5:
+ resolution: {integrity: sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==}
+
+ space-separated-tokens@2.0.2:
+ resolution: {integrity: sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==}
+
+ stable-hash@0.0.5:
+ resolution: {integrity: sha512-+L3ccpzibovGXFK+Ap/f8LOS0ahMrHTf3xu7mMLSpEGU0EO9ucaysSylKo9eRDFNhWve/y275iPmIZ4z39a9iA==}
+
+ stdin-discarder@0.1.0:
+ resolution: {integrity: sha512-xhV7w8S+bUwlPTb4bAOUQhv8/cSS5offJuX8GQGq32ONF0ZtDWKfkdomM3HMRA+LhX6um/FZ0COqlwsjD53LeQ==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
+ stop-iteration-iterator@1.1.0:
+ resolution: {integrity: sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ==}
+ engines: {node: '>= 0.4'}
+
+ streamsearch@1.1.0:
+ resolution: {integrity: sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==}
+ engines: {node: '>=10.0.0'}
+
+ string-width@4.2.3:
+ resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==}
+ engines: {node: '>=8'}
+
+ string-width@5.1.2:
+ resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==}
+ engines: {node: '>=12'}
+
+ string.prototype.includes@2.0.1:
+ resolution: {integrity: sha512-o7+c9bW6zpAdJHTtujeePODAhkuicdAryFsfVKwA+wGw89wJ4GTY484WTucM9hLtDEOpOvI+aHnzqnC5lHp4Rg==}
+ engines: {node: '>= 0.4'}
+
+ string.prototype.matchall@4.0.12:
+ resolution: {integrity: sha512-6CC9uyBL+/48dYizRf7H7VAYCMCNTBeM78x/VTUe9bFEaxBepPJDa1Ow99LqI/1yF7kuy7Q3cQsYMrcjGUcskA==}
+ engines: {node: '>= 0.4'}
+
+ string.prototype.repeat@1.0.0:
+ resolution: {integrity: sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w==}
+
+ string.prototype.trim@1.2.10:
+ resolution: {integrity: sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA==}
+ engines: {node: '>= 0.4'}
+
+ string.prototype.trimend@1.0.9:
+ resolution: {integrity: sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ==}
+ engines: {node: '>= 0.4'}
+
+ string.prototype.trimstart@1.0.8:
+ resolution: {integrity: sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==}
+ engines: {node: '>= 0.4'}
+
+ string_decoder@1.3.0:
+ resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==}
+
+ stringify-entities@4.0.4:
+ resolution: {integrity: sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==}
+
+ strip-ansi@6.0.1:
+ resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==}
+ engines: {node: '>=8'}
+
+ strip-ansi@7.2.0:
+ resolution: {integrity: sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==}
+ engines: {node: '>=12'}
+
+ strip-bom@3.0.0:
+ resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==}
+ engines: {node: '>=4'}
+
+ strip-final-newline@3.0.0:
+ resolution: {integrity: sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==}
+ engines: {node: '>=12'}
+
+ strip-json-comments@3.1.1:
+ resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==}
+ engines: {node: '>=8'}
+
+ style-to-js@1.1.21:
+ resolution: {integrity: sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==}
+
+ style-to-object@1.0.14:
+ resolution: {integrity: sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==}
+
+ styled-jsx@5.1.1:
+ resolution: {integrity: sha512-pW7uC1l4mBZ8ugbiZrcIsiIvVx1UmTfw7UkC3Um2tmfUq9Bhk8IiyEIPl6F8agHgjzku6j0xQEZbfA5uSgSaCw==}
+ engines: {node: '>= 12.0.0'}
+ peerDependencies:
+ '@babel/core': '*'
+ babel-plugin-macros: '*'
+ react: '>= 16.8.0 || 17.x.x || ^18.0.0-0'
+ peerDependenciesMeta:
+ '@babel/core':
+ optional: true
+ babel-plugin-macros:
+ optional: true
+
+ sucrase@3.35.1:
+ resolution: {integrity: sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==}
+ engines: {node: '>=16 || 14 >=14.17'}
+ hasBin: true
+
+ supports-color@7.2.0:
+ resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==}
+ engines: {node: '>=8'}
+
+ supports-preserve-symlinks-flag@1.0.0:
+ resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==}
+ engines: {node: '>= 0.4'}
+
+ swr@2.4.1:
+ resolution: {integrity: sha512-2CC6CiKQtEwaEeNiqWTAw9PGykW8SR5zZX8MZk6TeAvEAnVS7Visz8WzphqgtQ8v2xz/4Q5K+j+SeMaKXeeQIA==}
+ peerDependencies:
+ react: ^16.11.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
+
+ tailwind-merge@2.6.1:
+ resolution: {integrity: sha512-Oo6tHdpZsGpkKG88HJ8RR1rg/RdnEkQEfMoEk2x1XRI3F1AxeU+ijRXpiVUF4UbLfcxxRGw6TbUINKYdWVsQTQ==}
+
+ tailwindcss-animate@1.0.7:
+ resolution: {integrity: sha512-bl6mpH3T7I3UFxuvDEXLxy/VuFxBk5bbzplh7tXI68mwMokNYd1t9qPBHlnyTwfa4JGC4zP516I1hYYtQ/vspA==}
+ peerDependencies:
+ tailwindcss: '>=3.0.0 || insiders'
+
+ tailwindcss@3.4.19:
+ resolution: {integrity: sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==}
+ engines: {node: '>=14.0.0'}
+ hasBin: true
+
+ text-table@0.2.0:
+ resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==}
+
+ thenify-all@1.6.0:
+ resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==}
+ engines: {node: '>=0.8'}
+
+ thenify@3.3.1:
+ resolution: {integrity: sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==}
+
+ throttleit@2.1.0:
+ resolution: {integrity: sha512-nt6AMGKW1p/70DF/hGBdJB57B8Tspmbp5gfJ8ilhLnt7kkr2ye7hzD6NVG8GGErk2HWF34igrL2CXmNIkzKqKw==}
+ engines: {node: '>=18'}
+
+ tiny-invariant@1.3.3:
+ resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==}
+
+ tinyglobby@0.2.15:
+ resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==}
+ engines: {node: '>=12.0.0'}
+
+ to-regex-range@5.0.1:
+ resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==}
+ engines: {node: '>=8.0'}
+
+ trim-lines@3.0.1:
+ resolution: {integrity: sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==}
+
+ trough@2.2.0:
+ resolution: {integrity: sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==}
+
+ ts-api-utils@1.4.3:
+ resolution: {integrity: sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==}
+ engines: {node: '>=16'}
+ peerDependencies:
+ typescript: '>=4.2.0'
+
+ ts-interface-checker@0.1.13:
+ resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==}
+
+ ts-morph@18.0.0:
+ resolution: {integrity: sha512-Kg5u0mk19PIIe4islUI/HWRvm9bC1lHejK4S0oh1zaZ77TMZAEmQC0sHQYiu2RgCQFZKXz1fMVi/7nOOeirznA==}
+
+ tsconfig-paths@3.15.0:
+ resolution: {integrity: sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg==}
+
+ tsconfig-paths@4.2.0:
+ resolution: {integrity: sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==}
+ engines: {node: '>=6'}
+
+ tslib@2.8.1:
+ resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==}
+
+ type-check@0.4.0:
+ resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==}
+ engines: {node: '>= 0.8.0'}
+
+ type-fest@0.20.2:
+ resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==}
+ engines: {node: '>=10'}
+
+ typed-array-buffer@1.0.3:
+ resolution: {integrity: sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==}
+ engines: {node: '>= 0.4'}
+
+ typed-array-byte-length@1.0.3:
+ resolution: {integrity: sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg==}
+ engines: {node: '>= 0.4'}
+
+ typed-array-byte-offset@1.0.4:
+ resolution: {integrity: sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ==}
+ engines: {node: '>= 0.4'}
+
+ typed-array-length@1.0.7:
+ resolution: {integrity: sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==}
+ engines: {node: '>= 0.4'}
+
+ typescript@5.9.3:
+ resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==}
+ engines: {node: '>=14.17'}
+ hasBin: true
+
+ unbox-primitive@1.1.0:
+ resolution: {integrity: sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==}
+ engines: {node: '>= 0.4'}
+
+ undici-types@6.21.0:
+ resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==}
+
+ unified@11.0.5:
+ resolution: {integrity: sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==}
+
+ unist-util-find-after@5.0.0:
+ resolution: {integrity: sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ==}
+
+ unist-util-is@6.0.1:
+ resolution: {integrity: sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==}
+
+ unist-util-position@5.0.0:
+ resolution: {integrity: sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==}
+
+ unist-util-stringify-position@4.0.0:
+ resolution: {integrity: sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==}
+
+ unist-util-visit-parents@6.0.2:
+ resolution: {integrity: sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==}
+
+ unist-util-visit@5.1.0:
+ resolution: {integrity: sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg==}
+
+ universalify@2.0.1:
+ resolution: {integrity: sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==}
+ engines: {node: '>= 10.0.0'}
+
+ unrs-resolver@1.11.1:
+ resolution: {integrity: sha512-bSjt9pjaEBnNiGgc9rUiHGKv5l4/TGzDmYw3RhnkJGtLhbnnA/5qJj7x3dNDCRx/PJxu774LlH8lCOlB4hEfKg==}
+
+ update-browserslist-db@1.2.3:
+ resolution: {integrity: sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==}
+ hasBin: true
+ peerDependencies:
+ browserslist: '>= 4.21.0'
+
+ uri-js@4.4.1:
+ resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==}
+
+ use-callback-ref@1.3.3:
+ resolution: {integrity: sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg==}
+ engines: {node: '>=10'}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ use-sidecar@1.1.3:
+ resolution: {integrity: sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ==}
+ engines: {node: '>=10'}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ use-sync-external-store@1.6.0:
+ resolution: {integrity: sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==}
+ peerDependencies:
+ react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
+
+ util-deprecate@1.0.2:
+ resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
+
+ vfile-location@5.0.3:
+ resolution: {integrity: sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==}
+
+ vfile-message@4.0.3:
+ resolution: {integrity: sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==}
+
+ vfile@6.0.3:
+ resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==}
+
+ wcwidth@1.0.1:
+ resolution: {integrity: sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==}
+
+ web-namespaces@2.0.1:
+ resolution: {integrity: sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==}
+
+ web-streams-polyfill@3.3.3:
+ resolution: {integrity: sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==}
+ engines: {node: '>= 8'}
+
+ which-boxed-primitive@1.1.1:
+ resolution: {integrity: sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA==}
+ engines: {node: '>= 0.4'}
+
+ which-builtin-type@1.2.1:
+ resolution: {integrity: sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q==}
+ engines: {node: '>= 0.4'}
+
+ which-collection@1.0.2:
+ resolution: {integrity: sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==}
+ engines: {node: '>= 0.4'}
+
+ which-typed-array@1.1.20:
+ resolution: {integrity: sha512-LYfpUkmqwl0h9A2HL09Mms427Q1RZWuOHsukfVcKRq9q95iQxdw0ix1JQrqbcDR9PH1QDwf5Qo8OZb5lksZ8Xg==}
+ engines: {node: '>= 0.4'}
+
+ which@2.0.2:
+ resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==}
+ engines: {node: '>= 8'}
+ hasBin: true
+
+ word-wrap@1.2.5:
+ resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==}
+ engines: {node: '>=0.10.0'}
+
+ wrap-ansi@7.0.0:
+ resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==}
+ engines: {node: '>=10'}
+
+ wrap-ansi@8.1.0:
+ resolution: {integrity: sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==}
+ engines: {node: '>=12'}
+
+ wrappy@1.0.2:
+ resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
+
+ xtend@4.0.2:
+ resolution: {integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==}
+ engines: {node: '>=0.4'}
+
+ yallist@3.1.1:
+ resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==}
+
+ yocto-queue@0.1.0:
+ resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==}
+ engines: {node: '>=10'}
+
+ zod-to-json-schema@3.25.2:
+ resolution: {integrity: sha512-O/PgfnpT1xKSDeQYSCfRI5Gy3hPf91mKVDuYLUHZJMiDFptvP41MSnWofm8dnCm0256ZNfZIM7DSzuSMAFnjHA==}
+ peerDependencies:
+ zod: ^3.25.28 || ^4
+
+ zod@3.25.76:
+ resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==}
+
+ zwitch@2.0.4:
+ resolution: {integrity: sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==}
+
+snapshots:
+
+ '@ai-sdk/provider-utils@2.2.8(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/provider': 1.1.3
+ nanoid: 3.3.11
+ secure-json-parse: 2.7.0
+ zod: 3.25.76
+
+ '@ai-sdk/provider@1.1.3':
+ dependencies:
+ json-schema: 0.4.0
+
+ '@ai-sdk/react@1.2.12(react@18.3.1)(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
+ '@ai-sdk/ui-utils': 1.2.11(zod@3.25.76)
+ react: 18.3.1
+ swr: 2.4.1(react@18.3.1)
+ throttleit: 2.1.0
+ optionalDependencies:
+ zod: 3.25.76
+
+ '@ai-sdk/ui-utils@1.2.11(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/provider': 1.1.3
+ '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
+ zod: 3.25.76
+ zod-to-json-schema: 3.25.2(zod@3.25.76)
+
+ '@alloc/quick-lru@5.2.0': {}
+
+ '@antfu/ni@0.21.12': {}
+
+ '@babel/code-frame@7.29.0':
+ dependencies:
+ '@babel/helper-validator-identifier': 7.28.5
+ js-tokens: 4.0.0
+ picocolors: 1.1.1
+
+ '@babel/compat-data@7.29.0': {}
+
+ '@babel/core@7.29.0':
+ dependencies:
+ '@babel/code-frame': 7.29.0
+ '@babel/generator': 7.29.1
+ '@babel/helper-compilation-targets': 7.28.6
+ '@babel/helper-module-transforms': 7.28.6(@babel/core@7.29.0)
+ '@babel/helpers': 7.29.2
+ '@babel/parser': 7.29.2
+ '@babel/template': 7.28.6
+ '@babel/traverse': 7.29.0
+ '@babel/types': 7.29.0
+ '@jridgewell/remapping': 2.3.5
+ convert-source-map: 2.0.0
+ debug: 4.4.3
+ gensync: 1.0.0-beta.2
+ json5: 2.2.3
+ semver: 6.3.1
+ transitivePeerDependencies:
+ - supports-color
+
+ '@babel/generator@7.29.1':
+ dependencies:
+ '@babel/parser': 7.29.2
+ '@babel/types': 7.29.0
+ '@jridgewell/gen-mapping': 0.3.13
+ '@jridgewell/trace-mapping': 0.3.31
+ jsesc: 3.1.0
+
+ '@babel/helper-annotate-as-pure@7.27.3':
+ dependencies:
+ '@babel/types': 7.29.0
+
+ '@babel/helper-compilation-targets@7.28.6':
+ dependencies:
+ '@babel/compat-data': 7.29.0
+ '@babel/helper-validator-option': 7.27.1
+ browserslist: 4.28.2
+ lru-cache: 5.1.1
+ semver: 6.3.1
+
+ '@babel/helper-create-class-features-plugin@7.28.6(@babel/core@7.29.0)':
+ dependencies:
+ '@babel/core': 7.29.0
+ '@babel/helper-annotate-as-pure': 7.27.3
+ '@babel/helper-member-expression-to-functions': 7.28.5
+ '@babel/helper-optimise-call-expression': 7.27.1
+ '@babel/helper-replace-supers': 7.28.6(@babel/core@7.29.0)
+ '@babel/helper-skip-transparent-expression-wrappers': 7.27.1
+ '@babel/traverse': 7.29.0
+ semver: 6.3.1
+ transitivePeerDependencies:
+ - supports-color
+
+ '@babel/helper-globals@7.28.0': {}
+
+ '@babel/helper-member-expression-to-functions@7.28.5':
+ dependencies:
+ '@babel/traverse': 7.29.0
+ '@babel/types': 7.29.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@babel/helper-module-imports@7.28.6':
+ dependencies:
+ '@babel/traverse': 7.29.0
+ '@babel/types': 7.29.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@babel/helper-module-transforms@7.28.6(@babel/core@7.29.0)':
+ dependencies:
+ '@babel/core': 7.29.0
+ '@babel/helper-module-imports': 7.28.6
+ '@babel/helper-validator-identifier': 7.28.5
+ '@babel/traverse': 7.29.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@babel/helper-optimise-call-expression@7.27.1':
+ dependencies:
+ '@babel/types': 7.29.0
+
+ '@babel/helper-plugin-utils@7.28.6': {}
+
+ '@babel/helper-replace-supers@7.28.6(@babel/core@7.29.0)':
+ dependencies:
+ '@babel/core': 7.29.0
+ '@babel/helper-member-expression-to-functions': 7.28.5
+ '@babel/helper-optimise-call-expression': 7.27.1
+ '@babel/traverse': 7.29.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@babel/helper-skip-transparent-expression-wrappers@7.27.1':
+ dependencies:
+ '@babel/traverse': 7.29.0
+ '@babel/types': 7.29.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@babel/helper-string-parser@7.27.1': {}
+
+ '@babel/helper-validator-identifier@7.28.5': {}
+
+ '@babel/helper-validator-option@7.27.1': {}
+
+ '@babel/helpers@7.29.2':
+ dependencies:
+ '@babel/template': 7.28.6
+ '@babel/types': 7.29.0
+
+ '@babel/parser@7.29.2':
+ dependencies:
+ '@babel/types': 7.29.0
+
+ '@babel/plugin-syntax-typescript@7.28.6(@babel/core@7.29.0)':
+ dependencies:
+ '@babel/core': 7.29.0
+ '@babel/helper-plugin-utils': 7.28.6
+
+ '@babel/plugin-transform-typescript@7.28.6(@babel/core@7.29.0)':
+ dependencies:
+ '@babel/core': 7.29.0
+ '@babel/helper-annotate-as-pure': 7.27.3
+ '@babel/helper-create-class-features-plugin': 7.28.6(@babel/core@7.29.0)
+ '@babel/helper-plugin-utils': 7.28.6
+ '@babel/helper-skip-transparent-expression-wrappers': 7.27.1
+ '@babel/plugin-syntax-typescript': 7.28.6(@babel/core@7.29.0)
+ transitivePeerDependencies:
+ - supports-color
+
+ '@babel/runtime@7.29.2': {}
+
+ '@babel/template@7.28.6':
+ dependencies:
+ '@babel/code-frame': 7.29.0
+ '@babel/parser': 7.29.2
+ '@babel/types': 7.29.0
+
+ '@babel/traverse@7.29.0':
+ dependencies:
+ '@babel/code-frame': 7.29.0
+ '@babel/generator': 7.29.1
+ '@babel/helper-globals': 7.28.0
+ '@babel/parser': 7.29.2
+ '@babel/template': 7.28.6
+ '@babel/types': 7.29.0
+ debug: 4.4.3
+ transitivePeerDependencies:
+ - supports-color
+
+ '@babel/types@7.29.0':
+ dependencies:
+ '@babel/helper-string-parser': 7.27.1
+ '@babel/helper-validator-identifier': 7.28.5
+
+ '@emnapi/core@1.9.2':
+ dependencies:
+ '@emnapi/wasi-threads': 1.2.1
+ tslib: 2.8.1
+ optional: true
+
+ '@emnapi/runtime@1.9.2':
+ dependencies:
+ tslib: 2.8.1
+ optional: true
+
+ '@emnapi/wasi-threads@1.2.1':
+ dependencies:
+ tslib: 2.8.1
+ optional: true
+
+ '@eslint-community/eslint-utils@4.9.1(eslint@8.57.1)':
+ dependencies:
+ eslint: 8.57.1
+ eslint-visitor-keys: 3.4.3
+
+ '@eslint-community/regexpp@4.12.2': {}
+
+ '@eslint/eslintrc@2.1.4':
+ dependencies:
+ ajv: 6.14.0
+ debug: 4.4.3
+ espree: 9.6.1
+ globals: 13.24.0
+ ignore: 5.3.2
+ import-fresh: 3.3.1
+ js-yaml: 4.1.1
+ minimatch: 3.1.5
+ strip-json-comments: 3.1.1
+ transitivePeerDependencies:
+ - supports-color
+
+ '@eslint/js@8.57.1': {}
+
+ '@floating-ui/core@1.7.5':
+ dependencies:
+ '@floating-ui/utils': 0.2.11
+
+ '@floating-ui/dom@1.7.6':
+ dependencies:
+ '@floating-ui/core': 1.7.5
+ '@floating-ui/utils': 0.2.11
+
+ '@floating-ui/react-dom@2.1.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@floating-ui/dom': 1.7.6
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+
+ '@floating-ui/utils@0.2.11': {}
+
+ '@humanwhocodes/config-array@0.13.0':
+ dependencies:
+ '@humanwhocodes/object-schema': 2.0.3
+ debug: 4.4.3
+ minimatch: 3.1.5
+ transitivePeerDependencies:
+ - supports-color
+
+ '@humanwhocodes/module-importer@1.0.1': {}
+
+ '@humanwhocodes/object-schema@2.0.3': {}
+
+ '@isaacs/cliui@8.0.2':
+ dependencies:
+ string-width: 5.1.2
+ string-width-cjs: string-width@4.2.3
+ strip-ansi: 7.2.0
+ strip-ansi-cjs: strip-ansi@6.0.1
+ wrap-ansi: 8.1.0
+ wrap-ansi-cjs: wrap-ansi@7.0.0
+
+ '@jridgewell/gen-mapping@0.3.13':
+ dependencies:
+ '@jridgewell/sourcemap-codec': 1.5.5
+ '@jridgewell/trace-mapping': 0.3.31
+
+ '@jridgewell/remapping@2.3.5':
+ dependencies:
+ '@jridgewell/gen-mapping': 0.3.13
+ '@jridgewell/trace-mapping': 0.3.31
+
+ '@jridgewell/resolve-uri@3.1.2': {}
+
+ '@jridgewell/sourcemap-codec@1.5.5': {}
+
+ '@jridgewell/trace-mapping@0.3.31':
+ dependencies:
+ '@jridgewell/resolve-uri': 3.1.2
+ '@jridgewell/sourcemap-codec': 1.5.5
+
+ '@napi-rs/wasm-runtime@0.2.12':
+ dependencies:
+ '@emnapi/core': 1.9.2
+ '@emnapi/runtime': 1.9.2
+ '@tybys/wasm-util': 0.10.1
+ optional: true
+
+ '@next/env@14.2.32': {}
+
+ '@next/eslint-plugin-next@14.1.0':
+ dependencies:
+ glob: 10.3.10
+
+ '@next/swc-darwin-arm64@14.2.32':
+ optional: true
+
+ '@next/swc-darwin-x64@14.2.32':
+ optional: true
+
+ '@next/swc-linux-arm64-gnu@14.2.32':
+ optional: true
+
+ '@next/swc-linux-arm64-musl@14.2.32':
+ optional: true
+
+ '@next/swc-linux-x64-gnu@14.2.32':
+ optional: true
+
+ '@next/swc-linux-x64-musl@14.2.32':
+ optional: true
+
+ '@next/swc-win32-arm64-msvc@14.2.32':
+ optional: true
+
+ '@next/swc-win32-ia32-msvc@14.2.32':
+ optional: true
+
+ '@next/swc-win32-x64-msvc@14.2.32':
+ optional: true
+
+ '@nodelib/fs.scandir@2.1.5':
+ dependencies:
+ '@nodelib/fs.stat': 2.0.5
+ run-parallel: 1.2.0
+
+ '@nodelib/fs.stat@2.0.5': {}
+
+ '@nodelib/fs.walk@1.2.8':
+ dependencies:
+ '@nodelib/fs.scandir': 2.1.5
+ fastq: 1.20.1
+
+ '@nolyfill/is-core-module@1.0.39': {}
+
+ '@opentelemetry/api@1.9.0': {}
+
+ '@pkgjs/parseargs@0.11.0':
+ optional: true
+
+ '@radix-ui/number@1.1.1': {}
+
+ '@radix-ui/primitive@1.1.3': {}
+
+ '@radix-ui/react-accordion@1.2.12(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-collapsible': 1.1.12(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-collection': 1.1.7(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-context': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-direction': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-id': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-alert-dialog@1.1.15(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-context': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-dialog': 1.1.15(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-slot': 1.2.3(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-arrow@1.1.7(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-collapsible@1.1.12(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-context': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-id': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-presence': 1.1.5(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-collection@1.1.7(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-context': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-slot': 1.2.3(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-compose-refs@1.1.2(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-context@1.1.2(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-context@1.1.3(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-dialog@1.1.15(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-context': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-dismissable-layer': 1.1.11(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-focus-guards': 1.1.3(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-focus-scope': 1.1.7(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-id': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-portal': 1.1.9(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-presence': 1.1.5(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-slot': 1.2.3(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.28)(react@18.3.1)
+ aria-hidden: 1.2.6
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ react-remove-scroll: 2.7.2(@types/react@18.3.28)(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-direction@1.1.1(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-dismissable-layer@1.1.11(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-escape-keydown': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-dropdown-menu@2.1.16(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-context': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-id': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-menu': 2.1.16(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-focus-guards@1.1.3(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-focus-scope@1.1.7(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-id@1.1.1(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-label@2.1.8(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-primitive': 2.1.4(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-menu@2.1.16(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-collection': 1.1.7(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-context': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-direction': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-dismissable-layer': 1.1.11(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-focus-guards': 1.1.3(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-focus-scope': 1.1.7(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-id': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-popper': 1.2.8(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-portal': 1.1.9(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-presence': 1.1.5(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-roving-focus': 1.1.11(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-slot': 1.2.3(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ aria-hidden: 1.2.6
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ react-remove-scroll: 2.7.2(@types/react@18.3.28)(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-popover@1.1.15(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-context': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-dismissable-layer': 1.1.11(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-focus-guards': 1.1.3(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-focus-scope': 1.1.7(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-id': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-popper': 1.2.8(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-portal': 1.1.9(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-presence': 1.1.5(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-slot': 1.2.3(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.28)(react@18.3.1)
+ aria-hidden: 1.2.6
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ react-remove-scroll: 2.7.2(@types/react@18.3.28)(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-popper@1.2.8(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@floating-ui/react-dom': 2.1.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-arrow': 1.1.7(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-context': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-rect': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-size': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/rect': 1.1.1
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-portal@1.1.9(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-presence@1.1.5(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-primitive@2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-slot': 1.2.3(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-primitive@2.1.4(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-slot': 1.2.4(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-progress@1.1.8(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-context': 1.1.3(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.4(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-roving-focus@1.1.11(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-collection': 1.1.7(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-context': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-direction': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-id': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-select@2.2.6(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/number': 1.1.1
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-collection': 1.1.7(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-context': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-direction': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-dismissable-layer': 1.1.11(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-focus-guards': 1.1.3(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-focus-scope': 1.1.7(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-id': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-popper': 1.2.8(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-portal': 1.1.9(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-slot': 1.2.3(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-previous': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-visually-hidden': 1.2.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ aria-hidden: 1.2.6
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ react-remove-scroll: 2.7.2(@types/react@18.3.28)(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-slot@1.2.3(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-slot@1.2.4(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-switch@1.2.6(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-context': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-previous': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-size': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-tabs@1.1.13(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-context': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-direction': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-id': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-presence': 1.1.5(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-roving-focus': 1.1.11(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-toast@1.2.15(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-collection': 1.1.7(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-context': 1.1.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-dismissable-layer': 1.1.11(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-portal': 1.1.9(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-presence': 1.1.5(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-visually-hidden': 1.2.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/react-use-callback-ref@1.1.1(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-use-controllable-state@1.2.2(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-use-effect-event': 0.0.2(@types/react@18.3.28)(react@18.3.1)
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-use-effect-event@0.0.2(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-use-escape-keydown@1.1.1(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-use-layout-effect@1.1.1(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-use-previous@1.1.1(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-use-rect@1.1.1(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ '@radix-ui/rect': 1.1.1
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-use-size@1.1.1(@types/react@18.3.28)(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@18.3.28)(react@18.3.1)
+ react: 18.3.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ '@radix-ui/react-visually-hidden@1.2.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@18.3.7(@types/react@18.3.28))(@types/react@18.3.28)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+ '@types/react-dom': 18.3.7(@types/react@18.3.28)
+
+ '@radix-ui/rect@1.1.1': {}
+
+ '@rtsao/scc@1.1.0': {}
+
+ '@rushstack/eslint-patch@1.16.1': {}
+
+ '@swc/counter@0.1.3': {}
+
+ '@swc/helpers@0.5.5':
+ dependencies:
+ '@swc/counter': 0.1.3
+ tslib: 2.8.1
+
+ '@tailwindcss/line-clamp@0.4.4(tailwindcss@3.4.19)':
+ dependencies:
+ tailwindcss: 3.4.19
+
+ '@ts-morph/common@0.19.0':
+ dependencies:
+ fast-glob: 3.3.3
+ minimatch: 7.4.9
+ mkdirp: 2.1.6
+ path-browserify: 1.0.1
+
+ '@tybys/wasm-util@0.10.1':
+ dependencies:
+ tslib: 2.8.1
+ optional: true
+
+ '@types/debug@4.1.13':
+ dependencies:
+ '@types/ms': 2.1.0
+
+ '@types/diff-match-patch@1.0.36': {}
+
+ '@types/estree-jsx@1.0.5':
+ dependencies:
+ '@types/estree': 1.0.8
+
+ '@types/estree@1.0.8': {}
+
+ '@types/hast@2.3.10':
+ dependencies:
+ '@types/unist': 2.0.11
+
+ '@types/hast@3.0.4':
+ dependencies:
+ '@types/unist': 3.0.3
+
+ '@types/json5@0.0.29': {}
+
+ '@types/mdast@4.0.4':
+ dependencies:
+ '@types/unist': 3.0.3
+
+ '@types/ms@2.1.0': {}
+
+ '@types/node@20.19.39':
+ dependencies:
+ undici-types: 6.21.0
+
+ '@types/prop-types@15.7.15': {}
+
+ '@types/react-dom@18.3.7(@types/react@18.3.28)':
+ dependencies:
+ '@types/react': 18.3.28
+
+ '@types/react-file-icon@1.0.5':
+ dependencies:
+ '@types/react': 18.3.28
+
+ '@types/react@18.3.28':
+ dependencies:
+ '@types/prop-types': 15.7.15
+ csstype: 3.2.3
+
+ '@types/unist@2.0.11': {}
+
+ '@types/unist@3.0.3': {}
+
+ '@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.9.3)':
+ dependencies:
+ '@typescript-eslint/scope-manager': 6.21.0
+ '@typescript-eslint/types': 6.21.0
+ '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.9.3)
+ '@typescript-eslint/visitor-keys': 6.21.0
+ debug: 4.4.3
+ eslint: 8.57.1
+ optionalDependencies:
+ typescript: 5.9.3
+ transitivePeerDependencies:
+ - supports-color
+
+ '@typescript-eslint/scope-manager@6.21.0':
+ dependencies:
+ '@typescript-eslint/types': 6.21.0
+ '@typescript-eslint/visitor-keys': 6.21.0
+
+ '@typescript-eslint/types@6.21.0': {}
+
+ '@typescript-eslint/typescript-estree@6.21.0(typescript@5.9.3)':
+ dependencies:
+ '@typescript-eslint/types': 6.21.0
+ '@typescript-eslint/visitor-keys': 6.21.0
+ debug: 4.4.3
+ globby: 11.1.0
+ is-glob: 4.0.3
+ minimatch: 9.0.3
+ semver: 7.7.4
+ ts-api-utils: 1.4.3(typescript@5.9.3)
+ optionalDependencies:
+ typescript: 5.9.3
+ transitivePeerDependencies:
+ - supports-color
+
+ '@typescript-eslint/visitor-keys@6.21.0':
+ dependencies:
+ '@typescript-eslint/types': 6.21.0
+ eslint-visitor-keys: 3.4.3
+
+ '@ungap/structured-clone@1.3.0': {}
+
+ '@unrs/resolver-binding-android-arm-eabi@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-android-arm64@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-darwin-arm64@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-darwin-x64@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-freebsd-x64@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-linux-arm-gnueabihf@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-linux-arm-musleabihf@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-linux-arm64-gnu@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-linux-arm64-musl@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-linux-ppc64-gnu@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-linux-riscv64-gnu@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-linux-riscv64-musl@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-linux-s390x-gnu@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-linux-x64-gnu@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-linux-x64-musl@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-wasm32-wasi@1.11.1':
+ dependencies:
+ '@napi-rs/wasm-runtime': 0.2.12
+ optional: true
+
+ '@unrs/resolver-binding-win32-arm64-msvc@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-win32-ia32-msvc@1.11.1':
+ optional: true
+
+ '@unrs/resolver-binding-win32-x64-msvc@1.11.1':
+ optional: true
+
+ acorn-jsx@5.3.2(acorn@8.16.0):
+ dependencies:
+ acorn: 8.16.0
+
+ acorn@8.16.0: {}
+
+ agent-base@7.1.4: {}
+
+ ai@4.3.19(react@18.3.1)(zod@3.25.76):
+ dependencies:
+ '@ai-sdk/provider': 1.1.3
+ '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
+ '@ai-sdk/react': 1.2.12(react@18.3.1)(zod@3.25.76)
+ '@ai-sdk/ui-utils': 1.2.11(zod@3.25.76)
+ '@opentelemetry/api': 1.9.0
+ jsondiffpatch: 0.6.0
+ zod: 3.25.76
+ optionalDependencies:
+ react: 18.3.1
+
+ ajv@6.14.0:
+ dependencies:
+ fast-deep-equal: 3.1.3
+ fast-json-stable-stringify: 2.1.0
+ json-schema-traverse: 0.4.1
+ uri-js: 4.4.1
+
+ ansi-regex@5.0.1: {}
+
+ ansi-regex@6.2.2: {}
+
+ ansi-styles@4.3.0:
+ dependencies:
+ color-convert: 2.0.1
+
+ ansi-styles@6.2.3: {}
+
+ any-promise@1.3.0: {}
+
+ anymatch@3.1.3:
+ dependencies:
+ normalize-path: 3.0.0
+ picomatch: 2.3.2
+
+ arg@5.0.2: {}
+
+ argparse@2.0.1: {}
+
+ aria-hidden@1.2.6:
+ dependencies:
+ tslib: 2.8.1
+
+ aria-query@5.3.2: {}
+
+ array-buffer-byte-length@1.0.2:
+ dependencies:
+ call-bound: 1.0.4
+ is-array-buffer: 3.0.5
+
+ array-includes@3.1.9:
+ dependencies:
+ call-bind: 1.0.8
+ call-bound: 1.0.4
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+ es-object-atoms: 1.1.1
+ get-intrinsic: 1.3.0
+ is-string: 1.1.1
+ math-intrinsics: 1.1.0
+
+ array-union@2.1.0: {}
+
+ array.prototype.findlast@1.2.5:
+ dependencies:
+ call-bind: 1.0.8
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+ es-errors: 1.3.0
+ es-object-atoms: 1.1.1
+ es-shim-unscopables: 1.1.0
+
+ array.prototype.findlastindex@1.2.6:
+ dependencies:
+ call-bind: 1.0.8
+ call-bound: 1.0.4
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+ es-errors: 1.3.0
+ es-object-atoms: 1.1.1
+ es-shim-unscopables: 1.1.0
+
+ array.prototype.flat@1.3.3:
+ dependencies:
+ call-bind: 1.0.8
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+ es-shim-unscopables: 1.1.0
+
+ array.prototype.flatmap@1.3.3:
+ dependencies:
+ call-bind: 1.0.8
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+ es-shim-unscopables: 1.1.0
+
+ array.prototype.tosorted@1.1.4:
+ dependencies:
+ call-bind: 1.0.8
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+ es-errors: 1.3.0
+ es-shim-unscopables: 1.1.0
+
+ arraybuffer.prototype.slice@1.0.4:
+ dependencies:
+ array-buffer-byte-length: 1.0.2
+ call-bind: 1.0.8
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+ es-errors: 1.3.0
+ get-intrinsic: 1.3.0
+ is-array-buffer: 3.0.5
+
+ ast-types-flow@0.0.8: {}
+
+ ast-types@0.16.1:
+ dependencies:
+ tslib: 2.8.1
+
+ async-function@1.0.0: {}
+
+ attr-accept@2.2.5: {}
+
+ autoprefixer@10.4.27(postcss@8.5.8):
+ dependencies:
+ browserslist: 4.28.2
+ caniuse-lite: 1.0.30001786
+ fraction.js: 5.3.4
+ picocolors: 1.1.1
+ postcss: 8.5.8
+ postcss-value-parser: 4.2.0
+
+ available-typed-arrays@1.0.7:
+ dependencies:
+ possible-typed-array-names: 1.1.0
+
+ axe-core@4.11.2: {}
+
+ axobject-query@4.1.0: {}
+
+ bail@2.0.2: {}
+
+ balanced-match@1.0.2: {}
+
+ base64-js@1.5.1: {}
+
+ baseline-browser-mapping@2.10.16: {}
+
+ binary-extensions@2.3.0: {}
+
+ bl@5.1.0:
+ dependencies:
+ buffer: 6.0.3
+ inherits: 2.0.4
+ readable-stream: 3.6.2
+
+ brace-expansion@1.1.13:
+ dependencies:
+ balanced-match: 1.0.2
+ concat-map: 0.0.1
+
+ brace-expansion@2.0.3:
+ dependencies:
+ balanced-match: 1.0.2
+
+ braces@3.0.3:
+ dependencies:
+ fill-range: 7.1.1
+
+ browserslist@4.28.2:
+ dependencies:
+ baseline-browser-mapping: 2.10.16
+ caniuse-lite: 1.0.30001786
+ electron-to-chromium: 1.5.331
+ node-releases: 2.0.37
+ update-browserslist-db: 1.2.3(browserslist@4.28.2)
+
+ buffer@6.0.3:
+ dependencies:
+ base64-js: 1.5.1
+ ieee754: 1.2.1
+
+ busboy@1.6.0:
+ dependencies:
+ streamsearch: 1.1.0
+
+ call-bind-apply-helpers@1.0.2:
+ dependencies:
+ es-errors: 1.3.0
+ function-bind: 1.1.2
+
+ call-bind@1.0.8:
+ dependencies:
+ call-bind-apply-helpers: 1.0.2
+ es-define-property: 1.0.1
+ get-intrinsic: 1.3.0
+ set-function-length: 1.2.2
+
+ call-bound@1.0.4:
+ dependencies:
+ call-bind-apply-helpers: 1.0.2
+ get-intrinsic: 1.3.0
+
+ callsites@3.1.0: {}
+
+ camelcase-css@2.0.1: {}
+
+ caniuse-lite@1.0.30001786: {}
+
+ ccount@2.0.1: {}
+
+ chalk@4.1.2:
+ dependencies:
+ ansi-styles: 4.3.0
+ supports-color: 7.2.0
+
+ chalk@5.2.0: {}
+
+ chalk@5.6.2: {}
+
+ character-entities-html4@2.1.0: {}
+
+ character-entities-legacy@1.1.4: {}
+
+ character-entities-legacy@3.0.0: {}
+
+ character-entities@1.2.4: {}
+
+ character-entities@2.0.2: {}
+
+ character-reference-invalid@1.1.4: {}
+
+ character-reference-invalid@2.0.1: {}
+
+ chokidar@3.6.0:
+ dependencies:
+ anymatch: 3.1.3
+ braces: 3.0.3
+ glob-parent: 5.1.2
+ is-binary-path: 2.1.0
+ is-glob: 4.0.3
+ normalize-path: 3.0.0
+ readdirp: 3.6.0
+ optionalDependencies:
+ fsevents: 2.3.3
+
+ class-variance-authority@0.7.1:
+ dependencies:
+ clsx: 2.1.1
+
+ cli-cursor@4.0.0:
+ dependencies:
+ restore-cursor: 4.0.0
+
+ cli-spinners@2.9.2: {}
+
+ client-only@0.0.1: {}
+
+ clone@1.0.4: {}
+
+ clsx@2.1.1: {}
+
+ code-block-writer@12.0.0: {}
+
+ color-convert@2.0.1:
+ dependencies:
+ color-name: 1.1.4
+
+ color-name@1.1.4: {}
+
+ colord@2.9.3: {}
+
+ comma-separated-tokens@1.0.8: {}
+
+ comma-separated-tokens@2.0.3: {}
+
+ commander@10.0.1: {}
+
+ commander@4.1.1: {}
+
+ concat-map@0.0.1: {}
+
+ convert-source-map@2.0.0: {}
+
+ cosmiconfig@8.3.6(typescript@5.9.3):
+ dependencies:
+ import-fresh: 3.3.1
+ js-yaml: 4.1.1
+ parse-json: 5.2.0
+ path-type: 4.0.0
+ optionalDependencies:
+ typescript: 5.9.3
+
+ cross-spawn@7.0.6:
+ dependencies:
+ path-key: 3.1.1
+ shebang-command: 2.0.0
+ which: 2.0.2
+
+ cssesc@3.0.0: {}
+
+ csstype@3.2.3: {}
+
+ damerau-levenshtein@1.0.8: {}
+
+ data-uri-to-buffer@4.0.1: {}
+
+ data-view-buffer@1.0.2:
+ dependencies:
+ call-bound: 1.0.4
+ es-errors: 1.3.0
+ is-data-view: 1.0.2
+
+ data-view-byte-length@1.0.2:
+ dependencies:
+ call-bound: 1.0.4
+ es-errors: 1.3.0
+ is-data-view: 1.0.2
+
+ data-view-byte-offset@1.0.1:
+ dependencies:
+ call-bound: 1.0.4
+ es-errors: 1.3.0
+ is-data-view: 1.0.2
+
+ date-fns@4.1.0: {}
+
+ debug@3.2.7:
+ dependencies:
+ ms: 2.1.3
+
+ debug@4.4.3:
+ dependencies:
+ ms: 2.1.3
+
+ decode-named-character-reference@1.3.0:
+ dependencies:
+ character-entities: 2.0.2
+
+ deep-is@0.1.4: {}
+
+ defaults@1.0.4:
+ dependencies:
+ clone: 1.0.4
+
+ define-data-property@1.1.4:
+ dependencies:
+ es-define-property: 1.0.1
+ es-errors: 1.3.0
+ gopd: 1.2.0
+
+ define-properties@1.2.1:
+ dependencies:
+ define-data-property: 1.1.4
+ has-property-descriptors: 1.0.2
+ object-keys: 1.1.1
+
+ dequal@2.0.3: {}
+
+ detect-node-es@1.1.0: {}
+
+ devlop@1.1.0:
+ dependencies:
+ dequal: 2.0.3
+
+ didyoumean@1.2.2: {}
+
+ diff-match-patch@1.0.5: {}
+
+ diff@5.2.2: {}
+
+ dir-glob@3.0.1:
+ dependencies:
+ path-type: 4.0.0
+
+ dlv@1.1.3: {}
+
+ doctrine@2.1.0:
+ dependencies:
+ esutils: 2.0.3
+
+ doctrine@3.0.0:
+ dependencies:
+ esutils: 2.0.3
+
+ dunder-proto@1.0.1:
+ dependencies:
+ call-bind-apply-helpers: 1.0.2
+ es-errors: 1.3.0
+ gopd: 1.2.0
+
+ eastasianwidth@0.2.0: {}
+
+ electron-to-chromium@1.5.331: {}
+
+ emoji-regex@8.0.0: {}
+
+ emoji-regex@9.2.2: {}
+
+ entities@6.0.1: {}
+
+ error-ex@1.3.4:
+ dependencies:
+ is-arrayish: 0.2.1
+
+ es-abstract@1.24.1:
+ dependencies:
+ array-buffer-byte-length: 1.0.2
+ arraybuffer.prototype.slice: 1.0.4
+ available-typed-arrays: 1.0.7
+ call-bind: 1.0.8
+ call-bound: 1.0.4
+ data-view-buffer: 1.0.2
+ data-view-byte-length: 1.0.2
+ data-view-byte-offset: 1.0.1
+ es-define-property: 1.0.1
+ es-errors: 1.3.0
+ es-object-atoms: 1.1.1
+ es-set-tostringtag: 2.1.0
+ es-to-primitive: 1.3.0
+ function.prototype.name: 1.1.8
+ get-intrinsic: 1.3.0
+ get-proto: 1.0.1
+ get-symbol-description: 1.1.0
+ globalthis: 1.0.4
+ gopd: 1.2.0
+ has-property-descriptors: 1.0.2
+ has-proto: 1.2.0
+ has-symbols: 1.1.0
+ hasown: 2.0.2
+ internal-slot: 1.1.0
+ is-array-buffer: 3.0.5
+ is-callable: 1.2.7
+ is-data-view: 1.0.2
+ is-negative-zero: 2.0.3
+ is-regex: 1.2.1
+ is-set: 2.0.3
+ is-shared-array-buffer: 1.0.4
+ is-string: 1.1.1
+ is-typed-array: 1.1.15
+ is-weakref: 1.1.1
+ math-intrinsics: 1.1.0
+ object-inspect: 1.13.4
+ object-keys: 1.1.1
+ object.assign: 4.1.7
+ own-keys: 1.0.1
+ regexp.prototype.flags: 1.5.4
+ safe-array-concat: 1.1.3
+ safe-push-apply: 1.0.0
+ safe-regex-test: 1.1.0
+ set-proto: 1.0.0
+ stop-iteration-iterator: 1.1.0
+ string.prototype.trim: 1.2.10
+ string.prototype.trimend: 1.0.9
+ string.prototype.trimstart: 1.0.8
+ typed-array-buffer: 1.0.3
+ typed-array-byte-length: 1.0.3
+ typed-array-byte-offset: 1.0.4
+ typed-array-length: 1.0.7
+ unbox-primitive: 1.1.0
+ which-typed-array: 1.1.20
+
+ es-define-property@1.0.1: {}
+
+ es-errors@1.3.0: {}
+
+ es-iterator-helpers@1.3.1:
+ dependencies:
+ call-bind: 1.0.8
+ call-bound: 1.0.4
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+ es-errors: 1.3.0
+ es-set-tostringtag: 2.1.0
+ function-bind: 1.1.2
+ get-intrinsic: 1.3.0
+ globalthis: 1.0.4
+ gopd: 1.2.0
+ has-property-descriptors: 1.0.2
+ has-proto: 1.2.0
+ has-symbols: 1.1.0
+ internal-slot: 1.1.0
+ iterator.prototype: 1.1.5
+ math-intrinsics: 1.1.0
+ safe-array-concat: 1.1.3
+
+ es-object-atoms@1.1.1:
+ dependencies:
+ es-errors: 1.3.0
+
+ es-set-tostringtag@2.1.0:
+ dependencies:
+ es-errors: 1.3.0
+ get-intrinsic: 1.3.0
+ has-tostringtag: 1.0.2
+ hasown: 2.0.2
+
+ es-shim-unscopables@1.1.0:
+ dependencies:
+ hasown: 2.0.2
+
+ es-to-primitive@1.3.0:
+ dependencies:
+ is-callable: 1.2.7
+ is-date-object: 1.1.0
+ is-symbol: 1.1.1
+
+ escalade@3.2.0: {}
+
+ escape-string-regexp@4.0.0: {}
+
+ escape-string-regexp@5.0.0: {}
+
+ eslint-config-next@14.1.0(eslint@8.57.1)(typescript@5.9.3):
+ dependencies:
+ '@next/eslint-plugin-next': 14.1.0
+ '@rushstack/eslint-patch': 1.16.1
+ '@typescript-eslint/parser': 6.21.0(eslint@8.57.1)(typescript@5.9.3)
+ eslint: 8.57.1
+ eslint-import-resolver-node: 0.3.10
+ eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0(eslint@8.57.1))(eslint@8.57.1)
+ eslint-plugin-import: 2.32.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
+ eslint-plugin-jsx-a11y: 6.10.2(eslint@8.57.1)
+ eslint-plugin-react: 7.37.5(eslint@8.57.1)
+ eslint-plugin-react-hooks: 5.0.0-canary-7118f5dd7-20230705(eslint@8.57.1)
+ optionalDependencies:
+ typescript: 5.9.3
+ transitivePeerDependencies:
+ - eslint-import-resolver-webpack
+ - eslint-plugin-import-x
+ - supports-color
+
+ eslint-import-resolver-node@0.3.10:
+ dependencies:
+ debug: 3.2.7
+ is-core-module: 2.16.1
+ resolve: 2.0.0-next.6
+ transitivePeerDependencies:
+ - supports-color
+
+ eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(eslint@8.57.1))(eslint@8.57.1):
+ dependencies:
+ '@nolyfill/is-core-module': 1.0.39
+ debug: 4.4.3
+ eslint: 8.57.1
+ get-tsconfig: 4.13.7
+ is-bun-module: 2.0.0
+ stable-hash: 0.0.5
+ tinyglobby: 0.2.15
+ unrs-resolver: 1.11.1
+ optionalDependencies:
+ eslint-plugin-import: 2.32.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
+ transitivePeerDependencies:
+ - supports-color
+
+ eslint-module-utils@2.12.1(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.10)(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1):
+ dependencies:
+ debug: 3.2.7
+ optionalDependencies:
+ '@typescript-eslint/parser': 6.21.0(eslint@8.57.1)(typescript@5.9.3)
+ eslint: 8.57.1
+ eslint-import-resolver-node: 0.3.10
+ eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0(eslint@8.57.1))(eslint@8.57.1)
+ transitivePeerDependencies:
+ - supports-color
+
+ eslint-plugin-import@2.32.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1):
+ dependencies:
+ '@rtsao/scc': 1.1.0
+ array-includes: 3.1.9
+ array.prototype.findlastindex: 1.2.6
+ array.prototype.flat: 1.3.3
+ array.prototype.flatmap: 1.3.3
+ debug: 3.2.7
+ doctrine: 2.1.0
+ eslint: 8.57.1
+ eslint-import-resolver-node: 0.3.10
+ eslint-module-utils: 2.12.1(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.10)(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1)
+ hasown: 2.0.2
+ is-core-module: 2.16.1
+ is-glob: 4.0.3
+ minimatch: 3.1.5
+ object.fromentries: 2.0.8
+ object.groupby: 1.0.3
+ object.values: 1.2.1
+ semver: 6.3.1
+ string.prototype.trimend: 1.0.9
+ tsconfig-paths: 3.15.0
+ optionalDependencies:
+ '@typescript-eslint/parser': 6.21.0(eslint@8.57.1)(typescript@5.9.3)
+ transitivePeerDependencies:
+ - eslint-import-resolver-typescript
+ - eslint-import-resolver-webpack
+ - supports-color
+
+ eslint-plugin-jsx-a11y@6.10.2(eslint@8.57.1):
+ dependencies:
+ aria-query: 5.3.2
+ array-includes: 3.1.9
+ array.prototype.flatmap: 1.3.3
+ ast-types-flow: 0.0.8
+ axe-core: 4.11.2
+ axobject-query: 4.1.0
+ damerau-levenshtein: 1.0.8
+ emoji-regex: 9.2.2
+ eslint: 8.57.1
+ hasown: 2.0.2
+ jsx-ast-utils: 3.3.5
+ language-tags: 1.0.9
+ minimatch: 3.1.5
+ object.fromentries: 2.0.8
+ safe-regex-test: 1.1.0
+ string.prototype.includes: 2.0.1
+
+ eslint-plugin-react-hooks@5.0.0-canary-7118f5dd7-20230705(eslint@8.57.1):
+ dependencies:
+ eslint: 8.57.1
+
+ eslint-plugin-react@7.37.5(eslint@8.57.1):
+ dependencies:
+ array-includes: 3.1.9
+ array.prototype.findlast: 1.2.5
+ array.prototype.flatmap: 1.3.3
+ array.prototype.tosorted: 1.1.4
+ doctrine: 2.1.0
+ es-iterator-helpers: 1.3.1
+ eslint: 8.57.1
+ estraverse: 5.3.0
+ hasown: 2.0.2
+ jsx-ast-utils: 3.3.5
+ minimatch: 3.1.5
+ object.entries: 1.1.9
+ object.fromentries: 2.0.8
+ object.values: 1.2.1
+ prop-types: 15.8.1
+ resolve: 2.0.0-next.6
+ semver: 6.3.1
+ string.prototype.matchall: 4.0.12
+ string.prototype.repeat: 1.0.0
+
+ eslint-scope@7.2.2:
+ dependencies:
+ esrecurse: 4.3.0
+ estraverse: 5.3.0
+
+ eslint-visitor-keys@3.4.3: {}
+
+ eslint@8.57.1:
+ dependencies:
+ '@eslint-community/eslint-utils': 4.9.1(eslint@8.57.1)
+ '@eslint-community/regexpp': 4.12.2
+ '@eslint/eslintrc': 2.1.4
+ '@eslint/js': 8.57.1
+ '@humanwhocodes/config-array': 0.13.0
+ '@humanwhocodes/module-importer': 1.0.1
+ '@nodelib/fs.walk': 1.2.8
+ '@ungap/structured-clone': 1.3.0
+ ajv: 6.14.0
+ chalk: 4.1.2
+ cross-spawn: 7.0.6
+ debug: 4.4.3
+ doctrine: 3.0.0
+ escape-string-regexp: 4.0.0
+ eslint-scope: 7.2.2
+ eslint-visitor-keys: 3.4.3
+ espree: 9.6.1
+ esquery: 1.7.0
+ esutils: 2.0.3
+ fast-deep-equal: 3.1.3
+ file-entry-cache: 6.0.1
+ find-up: 5.0.0
+ glob-parent: 6.0.2
+ globals: 13.24.0
+ graphemer: 1.4.0
+ ignore: 5.3.2
+ imurmurhash: 0.1.4
+ is-glob: 4.0.3
+ is-path-inside: 3.0.3
+ js-yaml: 4.1.1
+ json-stable-stringify-without-jsonify: 1.0.1
+ levn: 0.4.1
+ lodash.merge: 4.6.2
+ minimatch: 3.1.5
+ natural-compare: 1.4.0
+ optionator: 0.9.4
+ strip-ansi: 6.0.1
+ text-table: 0.2.0
+ transitivePeerDependencies:
+ - supports-color
+
+ espree@9.6.1:
+ dependencies:
+ acorn: 8.16.0
+ acorn-jsx: 5.3.2(acorn@8.16.0)
+ eslint-visitor-keys: 3.4.3
+
+ esprima@4.0.1: {}
+
+ esquery@1.7.0:
+ dependencies:
+ estraverse: 5.3.0
+
+ esrecurse@4.3.0:
+ dependencies:
+ estraverse: 5.3.0
+
+ estraverse@5.3.0: {}
+
+ estree-util-is-identifier-name@3.0.0: {}
+
+ esutils@2.0.3: {}
+
+ execa@7.2.0:
+ dependencies:
+ cross-spawn: 7.0.6
+ get-stream: 6.0.1
+ human-signals: 4.3.1
+ is-stream: 3.0.0
+ merge-stream: 2.0.0
+ npm-run-path: 5.3.0
+ onetime: 6.0.0
+ signal-exit: 3.0.7
+ strip-final-newline: 3.0.0
+
+ extend@3.0.2: {}
+
+ fast-deep-equal@3.1.3: {}
+
+ fast-glob@3.3.3:
+ dependencies:
+ '@nodelib/fs.stat': 2.0.5
+ '@nodelib/fs.walk': 1.2.8
+ glob-parent: 5.1.2
+ merge2: 1.4.1
+ micromatch: 4.0.8
+
+ fast-json-stable-stringify@2.1.0: {}
+
+ fast-levenshtein@2.0.6: {}
+
+ fastq@1.20.1:
+ dependencies:
+ reusify: 1.1.0
+
+ fault@1.0.4:
+ dependencies:
+ format: 0.2.2
+
+ fdir@6.5.0(picomatch@4.0.4):
+ optionalDependencies:
+ picomatch: 4.0.4
+
+ fetch-blob@3.2.0:
+ dependencies:
+ node-domexception: 1.0.0
+ web-streams-polyfill: 3.3.3
+
+ file-entry-cache@6.0.1:
+ dependencies:
+ flat-cache: 3.2.0
+
+ file-selector@2.1.2:
+ dependencies:
+ tslib: 2.8.1
+
+ fill-range@7.1.1:
+ dependencies:
+ to-regex-range: 5.0.1
+
+ find-up@5.0.0:
+ dependencies:
+ locate-path: 6.0.0
+ path-exists: 4.0.0
+
+ flat-cache@3.2.0:
+ dependencies:
+ flatted: 3.4.2
+ keyv: 4.5.4
+ rimraf: 3.0.2
+
+ flatted@3.4.2: {}
+
+ for-each@0.3.5:
+ dependencies:
+ is-callable: 1.2.7
+
+ foreground-child@3.3.1:
+ dependencies:
+ cross-spawn: 7.0.6
+ signal-exit: 4.1.0
+
+ format@0.2.2: {}
+
+ formdata-polyfill@4.0.10:
+ dependencies:
+ fetch-blob: 3.2.0
+
+ fraction.js@5.3.4: {}
+
+ fs-extra@11.3.4:
+ dependencies:
+ graceful-fs: 4.2.11
+ jsonfile: 6.2.0
+ universalify: 2.0.1
+
+ fs.realpath@1.0.0: {}
+
+ fsevents@2.3.3:
+ optional: true
+
+ function-bind@1.1.2: {}
+
+ function.prototype.name@1.1.8:
+ dependencies:
+ call-bind: 1.0.8
+ call-bound: 1.0.4
+ define-properties: 1.2.1
+ functions-have-names: 1.2.3
+ hasown: 2.0.2
+ is-callable: 1.2.7
+
+ functions-have-names@1.2.3: {}
+
+ generator-function@2.0.1: {}
+
+ gensync@1.0.0-beta.2: {}
+
+ get-intrinsic@1.3.0:
+ dependencies:
+ call-bind-apply-helpers: 1.0.2
+ es-define-property: 1.0.1
+ es-errors: 1.3.0
+ es-object-atoms: 1.1.1
+ function-bind: 1.1.2
+ get-proto: 1.0.1
+ gopd: 1.2.0
+ has-symbols: 1.1.0
+ hasown: 2.0.2
+ math-intrinsics: 1.1.0
+
+ get-nonce@1.0.1: {}
+
+ get-proto@1.0.1:
+ dependencies:
+ dunder-proto: 1.0.1
+ es-object-atoms: 1.1.1
+
+ get-stream@6.0.1: {}
+
+ get-symbol-description@1.1.0:
+ dependencies:
+ call-bound: 1.0.4
+ es-errors: 1.3.0
+ get-intrinsic: 1.3.0
+
+ get-tsconfig@4.13.7:
+ dependencies:
+ resolve-pkg-maps: 1.0.0
+
+ glob-parent@5.1.2:
+ dependencies:
+ is-glob: 4.0.3
+
+ glob-parent@6.0.2:
+ dependencies:
+ is-glob: 4.0.3
+
+ glob@10.3.10:
+ dependencies:
+ foreground-child: 3.3.1
+ jackspeak: 2.3.6
+ minimatch: 9.0.9
+ minipass: 7.1.3
+ path-scurry: 1.11.1
+
+ glob@7.2.3:
+ dependencies:
+ fs.realpath: 1.0.0
+ inflight: 1.0.6
+ inherits: 2.0.4
+ minimatch: 3.1.5
+ once: 1.4.0
+ path-is-absolute: 1.0.1
+
+ globals@13.24.0:
+ dependencies:
+ type-fest: 0.20.2
+
+ globalthis@1.0.4:
+ dependencies:
+ define-properties: 1.2.1
+ gopd: 1.2.0
+
+ globby@11.1.0:
+ dependencies:
+ array-union: 2.1.0
+ dir-glob: 3.0.1
+ fast-glob: 3.3.3
+ ignore: 5.3.2
+ merge2: 1.4.1
+ slash: 3.0.0
+
+ gopd@1.2.0: {}
+
+ graceful-fs@4.2.11: {}
+
+ graphemer@1.4.0: {}
+
+ has-bigints@1.1.0: {}
+
+ has-flag@4.0.0: {}
+
+ has-property-descriptors@1.0.2:
+ dependencies:
+ es-define-property: 1.0.1
+
+ has-proto@1.2.0:
+ dependencies:
+ dunder-proto: 1.0.1
+
+ has-symbols@1.1.0: {}
+
+ has-tostringtag@1.0.2:
+ dependencies:
+ has-symbols: 1.1.0
+
+ hasown@2.0.2:
+ dependencies:
+ function-bind: 1.1.2
+
+ hast-util-from-parse5@8.0.3:
+ dependencies:
+ '@types/hast': 3.0.4
+ '@types/unist': 3.0.3
+ devlop: 1.1.0
+ hastscript: 9.0.1
+ property-information: 7.1.0
+ vfile: 6.0.3
+ vfile-location: 5.0.3
+ web-namespaces: 2.0.1
+
+ hast-util-is-element@3.0.0:
+ dependencies:
+ '@types/hast': 3.0.4
+
+ hast-util-parse-selector@2.2.5: {}
+
+ hast-util-parse-selector@4.0.0:
+ dependencies:
+ '@types/hast': 3.0.4
+
+ hast-util-raw@9.1.0:
+ dependencies:
+ '@types/hast': 3.0.4
+ '@types/unist': 3.0.3
+ '@ungap/structured-clone': 1.3.0
+ hast-util-from-parse5: 8.0.3
+ hast-util-to-parse5: 8.0.1
+ html-void-elements: 3.0.0
+ mdast-util-to-hast: 13.2.1
+ parse5: 7.3.0
+ unist-util-position: 5.0.0
+ unist-util-visit: 5.1.0
+ vfile: 6.0.3
+ web-namespaces: 2.0.1
+ zwitch: 2.0.4
+
+ hast-util-to-jsx-runtime@2.3.6:
+ dependencies:
+ '@types/estree': 1.0.8
+ '@types/hast': 3.0.4
+ '@types/unist': 3.0.3
+ comma-separated-tokens: 2.0.3
+ devlop: 1.1.0
+ estree-util-is-identifier-name: 3.0.0
+ hast-util-whitespace: 3.0.0
+ mdast-util-mdx-expression: 2.0.1
+ mdast-util-mdx-jsx: 3.2.0
+ mdast-util-mdxjs-esm: 2.0.1
+ property-information: 7.1.0
+ space-separated-tokens: 2.0.2
+ style-to-js: 1.1.21
+ unist-util-position: 5.0.0
+ vfile-message: 4.0.3
+ transitivePeerDependencies:
+ - supports-color
+
+ hast-util-to-parse5@8.0.1:
+ dependencies:
+ '@types/hast': 3.0.4
+ comma-separated-tokens: 2.0.3
+ devlop: 1.1.0
+ property-information: 7.1.0
+ space-separated-tokens: 2.0.2
+ web-namespaces: 2.0.1
+ zwitch: 2.0.4
+
+ hast-util-to-text@4.0.2:
+ dependencies:
+ '@types/hast': 3.0.4
+ '@types/unist': 3.0.3
+ hast-util-is-element: 3.0.0
+ unist-util-find-after: 5.0.0
+
+ hast-util-whitespace@3.0.0:
+ dependencies:
+ '@types/hast': 3.0.4
+
+ hastscript@6.0.0:
+ dependencies:
+ '@types/hast': 2.3.10
+ comma-separated-tokens: 1.0.8
+ hast-util-parse-selector: 2.2.5
+ property-information: 5.6.0
+ space-separated-tokens: 1.1.5
+
+ hastscript@9.0.1:
+ dependencies:
+ '@types/hast': 3.0.4
+ comma-separated-tokens: 2.0.3
+ hast-util-parse-selector: 4.0.0
+ property-information: 7.1.0
+ space-separated-tokens: 2.0.2
+
+ highlight.js@10.7.3: {}
+
+ highlight.js@11.11.1: {}
+
+ highlightjs-vue@1.0.0: {}
+
+ html-url-attributes@3.0.1: {}
+
+ html-void-elements@3.0.0: {}
+
+ https-proxy-agent@6.2.1:
+ dependencies:
+ agent-base: 7.1.4
+ debug: 4.4.3
+ transitivePeerDependencies:
+ - supports-color
+
+ human-signals@4.3.1: {}
+
+ ieee754@1.2.1: {}
+
+ ignore@5.3.2: {}
+
+ import-fresh@3.3.1:
+ dependencies:
+ parent-module: 1.0.1
+ resolve-from: 4.0.0
+
+ imurmurhash@0.1.4: {}
+
+ inflight@1.0.6:
+ dependencies:
+ once: 1.4.0
+ wrappy: 1.0.2
+
+ inherits@2.0.4: {}
+
+ inline-style-parser@0.2.7: {}
+
+ internal-slot@1.1.0:
+ dependencies:
+ es-errors: 1.3.0
+ hasown: 2.0.2
+ side-channel: 1.1.0
+
+ is-alphabetical@1.0.4: {}
+
+ is-alphabetical@2.0.1: {}
+
+ is-alphanumerical@1.0.4:
+ dependencies:
+ is-alphabetical: 1.0.4
+ is-decimal: 1.0.4
+
+ is-alphanumerical@2.0.1:
+ dependencies:
+ is-alphabetical: 2.0.1
+ is-decimal: 2.0.1
+
+ is-array-buffer@3.0.5:
+ dependencies:
+ call-bind: 1.0.8
+ call-bound: 1.0.4
+ get-intrinsic: 1.3.0
+
+ is-arrayish@0.2.1: {}
+
+ is-async-function@2.1.1:
+ dependencies:
+ async-function: 1.0.0
+ call-bound: 1.0.4
+ get-proto: 1.0.1
+ has-tostringtag: 1.0.2
+ safe-regex-test: 1.1.0
+
+ is-bigint@1.1.0:
+ dependencies:
+ has-bigints: 1.1.0
+
+ is-binary-path@2.1.0:
+ dependencies:
+ binary-extensions: 2.3.0
+
+ is-boolean-object@1.2.2:
+ dependencies:
+ call-bound: 1.0.4
+ has-tostringtag: 1.0.2
+
+ is-bun-module@2.0.0:
+ dependencies:
+ semver: 7.7.4
+
+ is-callable@1.2.7: {}
+
+ is-core-module@2.16.1:
+ dependencies:
+ hasown: 2.0.2
+
+ is-data-view@1.0.2:
+ dependencies:
+ call-bound: 1.0.4
+ get-intrinsic: 1.3.0
+ is-typed-array: 1.1.15
+
+ is-date-object@1.1.0:
+ dependencies:
+ call-bound: 1.0.4
+ has-tostringtag: 1.0.2
+
+ is-decimal@1.0.4: {}
+
+ is-decimal@2.0.1: {}
+
+ is-extglob@2.1.1: {}
+
+ is-finalizationregistry@1.1.1:
+ dependencies:
+ call-bound: 1.0.4
+
+ is-fullwidth-code-point@3.0.0: {}
+
+ is-generator-function@1.1.2:
+ dependencies:
+ call-bound: 1.0.4
+ generator-function: 2.0.1
+ get-proto: 1.0.1
+ has-tostringtag: 1.0.2
+ safe-regex-test: 1.1.0
+
+ is-glob@4.0.3:
+ dependencies:
+ is-extglob: 2.1.1
+
+ is-hexadecimal@1.0.4: {}
+
+ is-hexadecimal@2.0.1: {}
+
+ is-interactive@2.0.0: {}
+
+ is-map@2.0.3: {}
+
+ is-negative-zero@2.0.3: {}
+
+ is-number-object@1.1.1:
+ dependencies:
+ call-bound: 1.0.4
+ has-tostringtag: 1.0.2
+
+ is-number@7.0.0: {}
+
+ is-path-inside@3.0.3: {}
+
+ is-plain-obj@4.1.0: {}
+
+ is-regex@1.2.1:
+ dependencies:
+ call-bound: 1.0.4
+ gopd: 1.2.0
+ has-tostringtag: 1.0.2
+ hasown: 2.0.2
+
+ is-set@2.0.3: {}
+
+ is-shared-array-buffer@1.0.4:
+ dependencies:
+ call-bound: 1.0.4
+
+ is-stream@3.0.0: {}
+
+ is-string@1.1.1:
+ dependencies:
+ call-bound: 1.0.4
+ has-tostringtag: 1.0.2
+
+ is-symbol@1.1.1:
+ dependencies:
+ call-bound: 1.0.4
+ has-symbols: 1.1.0
+ safe-regex-test: 1.1.0
+
+ is-typed-array@1.1.15:
+ dependencies:
+ which-typed-array: 1.1.20
+
+ is-unicode-supported@1.3.0: {}
+
+ is-weakmap@2.0.2: {}
+
+ is-weakref@1.1.1:
+ dependencies:
+ call-bound: 1.0.4
+
+ is-weakset@2.0.4:
+ dependencies:
+ call-bound: 1.0.4
+ get-intrinsic: 1.3.0
+
+ isarray@2.0.5: {}
+
+ isexe@2.0.0: {}
+
+ iterator.prototype@1.1.5:
+ dependencies:
+ define-data-property: 1.1.4
+ es-object-atoms: 1.1.1
+ get-intrinsic: 1.3.0
+ get-proto: 1.0.1
+ has-symbols: 1.1.0
+ set-function-name: 2.0.2
+
+ jackspeak@2.3.6:
+ dependencies:
+ '@isaacs/cliui': 8.0.2
+ optionalDependencies:
+ '@pkgjs/parseargs': 0.11.0
+
+ jiti@1.21.7: {}
+
+ js-tokens@4.0.0: {}
+
+ js-yaml@4.1.1:
+ dependencies:
+ argparse: 2.0.1
+
+ jsesc@3.1.0: {}
+
+ json-buffer@3.0.1: {}
+
+ json-parse-even-better-errors@2.3.1: {}
+
+ json-schema-traverse@0.4.1: {}
+
+ json-schema@0.4.0: {}
+
+ json-stable-stringify-without-jsonify@1.0.1: {}
+
+ json5@1.0.2:
+ dependencies:
+ minimist: 1.2.8
+
+ json5@2.2.3: {}
+
+ jsondiffpatch@0.6.0:
+ dependencies:
+ '@types/diff-match-patch': 1.0.36
+ chalk: 5.6.2
+ diff-match-patch: 1.0.5
+
+ jsonfile@6.2.0:
+ dependencies:
+ universalify: 2.0.1
+ optionalDependencies:
+ graceful-fs: 4.2.11
+
+ jsx-ast-utils@3.3.5:
+ dependencies:
+ array-includes: 3.1.9
+ array.prototype.flat: 1.3.3
+ object.assign: 4.1.7
+ object.values: 1.2.1
+
+ keyv@4.5.4:
+ dependencies:
+ json-buffer: 3.0.1
+
+ kleur@3.0.3: {}
+
+ language-subtag-registry@0.3.23: {}
+
+ language-tags@1.0.9:
+ dependencies:
+ language-subtag-registry: 0.3.23
+
+ levn@0.4.1:
+ dependencies:
+ prelude-ls: 1.2.1
+ type-check: 0.4.0
+
+ lilconfig@3.1.3: {}
+
+ lines-and-columns@1.2.4: {}
+
+ locate-path@6.0.0:
+ dependencies:
+ p-locate: 5.0.0
+
+ lodash._reinterpolate@3.0.0: {}
+
+ lodash.merge@4.6.2: {}
+
+ lodash.template@4.18.1:
+ dependencies:
+ lodash._reinterpolate: 3.0.0
+ lodash.templatesettings: 4.2.0
+
+ lodash.templatesettings@4.2.0:
+ dependencies:
+ lodash._reinterpolate: 3.0.0
+
+ log-symbols@5.1.0:
+ dependencies:
+ chalk: 5.2.0
+ is-unicode-supported: 1.3.0
+
+ longest-streak@3.1.0: {}
+
+ loose-envify@1.4.0:
+ dependencies:
+ js-tokens: 4.0.0
+
+ lowlight@1.20.0:
+ dependencies:
+ fault: 1.0.4
+ highlight.js: 10.7.3
+
+ lowlight@3.3.0:
+ dependencies:
+ '@types/hast': 3.0.4
+ devlop: 1.1.0
+ highlight.js: 11.11.1
+
+ lru-cache@10.4.3: {}
+
+ lru-cache@5.1.1:
+ dependencies:
+ yallist: 3.1.1
+
+ lucide-react@0.323.0(react@18.3.1):
+ dependencies:
+ react: 18.3.1
+
+ markdown-table@3.0.4: {}
+
+ math-intrinsics@1.1.0: {}
+
+ mdast-util-find-and-replace@3.0.2:
+ dependencies:
+ '@types/mdast': 4.0.4
+ escape-string-regexp: 5.0.0
+ unist-util-is: 6.0.1
+ unist-util-visit-parents: 6.0.2
+
+ mdast-util-from-markdown@2.0.3:
+ dependencies:
+ '@types/mdast': 4.0.4
+ '@types/unist': 3.0.3
+ decode-named-character-reference: 1.3.0
+ devlop: 1.1.0
+ mdast-util-to-string: 4.0.0
+ micromark: 4.0.2
+ micromark-util-decode-numeric-character-reference: 2.0.2
+ micromark-util-decode-string: 2.0.1
+ micromark-util-normalize-identifier: 2.0.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+ unist-util-stringify-position: 4.0.0
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-gfm-autolink-literal@2.0.1:
+ dependencies:
+ '@types/mdast': 4.0.4
+ ccount: 2.0.1
+ devlop: 1.1.0
+ mdast-util-find-and-replace: 3.0.2
+ micromark-util-character: 2.1.1
+
+ mdast-util-gfm-footnote@2.1.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+ devlop: 1.1.0
+ mdast-util-from-markdown: 2.0.3
+ mdast-util-to-markdown: 2.1.2
+ micromark-util-normalize-identifier: 2.0.1
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-gfm-strikethrough@2.0.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+ mdast-util-from-markdown: 2.0.3
+ mdast-util-to-markdown: 2.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-gfm-table@2.0.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+ devlop: 1.1.0
+ markdown-table: 3.0.4
+ mdast-util-from-markdown: 2.0.3
+ mdast-util-to-markdown: 2.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-gfm-task-list-item@2.0.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+ devlop: 1.1.0
+ mdast-util-from-markdown: 2.0.3
+ mdast-util-to-markdown: 2.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-gfm@3.1.0:
+ dependencies:
+ mdast-util-from-markdown: 2.0.3
+ mdast-util-gfm-autolink-literal: 2.0.1
+ mdast-util-gfm-footnote: 2.1.0
+ mdast-util-gfm-strikethrough: 2.0.0
+ mdast-util-gfm-table: 2.0.0
+ mdast-util-gfm-task-list-item: 2.0.0
+ mdast-util-to-markdown: 2.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-mdx-expression@2.0.1:
+ dependencies:
+ '@types/estree-jsx': 1.0.5
+ '@types/hast': 3.0.4
+ '@types/mdast': 4.0.4
+ devlop: 1.1.0
+ mdast-util-from-markdown: 2.0.3
+ mdast-util-to-markdown: 2.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-mdx-jsx@3.2.0:
+ dependencies:
+ '@types/estree-jsx': 1.0.5
+ '@types/hast': 3.0.4
+ '@types/mdast': 4.0.4
+ '@types/unist': 3.0.3
+ ccount: 2.0.1
+ devlop: 1.1.0
+ mdast-util-from-markdown: 2.0.3
+ mdast-util-to-markdown: 2.1.2
+ parse-entities: 4.0.2
+ stringify-entities: 4.0.4
+ unist-util-stringify-position: 4.0.0
+ vfile-message: 4.0.3
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-mdxjs-esm@2.0.1:
+ dependencies:
+ '@types/estree-jsx': 1.0.5
+ '@types/hast': 3.0.4
+ '@types/mdast': 4.0.4
+ devlop: 1.1.0
+ mdast-util-from-markdown: 2.0.3
+ mdast-util-to-markdown: 2.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-phrasing@4.1.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+ unist-util-is: 6.0.1
+
+ mdast-util-to-hast@13.2.1:
+ dependencies:
+ '@types/hast': 3.0.4
+ '@types/mdast': 4.0.4
+ '@ungap/structured-clone': 1.3.0
+ devlop: 1.1.0
+ micromark-util-sanitize-uri: 2.0.1
+ trim-lines: 3.0.1
+ unist-util-position: 5.0.0
+ unist-util-visit: 5.1.0
+ vfile: 6.0.3
+
+ mdast-util-to-markdown@2.1.2:
+ dependencies:
+ '@types/mdast': 4.0.4
+ '@types/unist': 3.0.3
+ longest-streak: 3.1.0
+ mdast-util-phrasing: 4.1.0
+ mdast-util-to-string: 4.0.0
+ micromark-util-classify-character: 2.0.1
+ micromark-util-decode-string: 2.0.1
+ unist-util-visit: 5.1.0
+ zwitch: 2.0.4
+
+ mdast-util-to-string@4.0.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+
+ merge-stream@2.0.0: {}
+
+ merge2@1.4.1: {}
+
+ micromark-core-commonmark@2.0.3:
+ dependencies:
+ decode-named-character-reference: 1.3.0
+ devlop: 1.1.0
+ micromark-factory-destination: 2.0.1
+ micromark-factory-label: 2.0.1
+ micromark-factory-space: 2.0.1
+ micromark-factory-title: 2.0.1
+ micromark-factory-whitespace: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-chunked: 2.0.1
+ micromark-util-classify-character: 2.0.1
+ micromark-util-html-tag-name: 2.0.1
+ micromark-util-normalize-identifier: 2.0.1
+ micromark-util-resolve-all: 2.0.1
+ micromark-util-subtokenize: 2.1.0
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-extension-gfm-autolink-literal@2.1.0:
+ dependencies:
+ micromark-util-character: 2.1.1
+ micromark-util-sanitize-uri: 2.0.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-extension-gfm-footnote@2.1.0:
+ dependencies:
+ devlop: 1.1.0
+ micromark-core-commonmark: 2.0.3
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-normalize-identifier: 2.0.1
+ micromark-util-sanitize-uri: 2.0.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-extension-gfm-strikethrough@2.1.0:
+ dependencies:
+ devlop: 1.1.0
+ micromark-util-chunked: 2.0.1
+ micromark-util-classify-character: 2.0.1
+ micromark-util-resolve-all: 2.0.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-extension-gfm-table@2.1.1:
+ dependencies:
+ devlop: 1.1.0
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-extension-gfm-tagfilter@2.0.0:
+ dependencies:
+ micromark-util-types: 2.0.2
+
+ micromark-extension-gfm-task-list-item@2.1.0:
+ dependencies:
+ devlop: 1.1.0
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-extension-gfm@3.0.0:
+ dependencies:
+ micromark-extension-gfm-autolink-literal: 2.1.0
+ micromark-extension-gfm-footnote: 2.1.0
+ micromark-extension-gfm-strikethrough: 2.1.0
+ micromark-extension-gfm-table: 2.1.1
+ micromark-extension-gfm-tagfilter: 2.0.0
+ micromark-extension-gfm-task-list-item: 2.1.0
+ micromark-util-combine-extensions: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-factory-destination@2.0.1:
+ dependencies:
+ micromark-util-character: 2.1.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-factory-label@2.0.1:
+ dependencies:
+ devlop: 1.1.0
+ micromark-util-character: 2.1.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-factory-space@2.0.1:
+ dependencies:
+ micromark-util-character: 2.1.1
+ micromark-util-types: 2.0.2
+
+ micromark-factory-title@2.0.1:
+ dependencies:
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-factory-whitespace@2.0.1:
+ dependencies:
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-util-character@2.1.1:
+ dependencies:
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-util-chunked@2.0.1:
+ dependencies:
+ micromark-util-symbol: 2.0.1
+
+ micromark-util-classify-character@2.0.1:
+ dependencies:
+ micromark-util-character: 2.1.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-util-combine-extensions@2.0.1:
+ dependencies:
+ micromark-util-chunked: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-util-decode-numeric-character-reference@2.0.2:
+ dependencies:
+ micromark-util-symbol: 2.0.1
+
+ micromark-util-decode-string@2.0.1:
+ dependencies:
+ decode-named-character-reference: 1.3.0
+ micromark-util-character: 2.1.1
+ micromark-util-decode-numeric-character-reference: 2.0.2
+ micromark-util-symbol: 2.0.1
+
+ micromark-util-encode@2.0.1: {}
+
+ micromark-util-html-tag-name@2.0.1: {}
+
+ micromark-util-normalize-identifier@2.0.1:
+ dependencies:
+ micromark-util-symbol: 2.0.1
+
+ micromark-util-resolve-all@2.0.1:
+ dependencies:
+ micromark-util-types: 2.0.2
+
+ micromark-util-sanitize-uri@2.0.1:
+ dependencies:
+ micromark-util-character: 2.1.1
+ micromark-util-encode: 2.0.1
+ micromark-util-symbol: 2.0.1
+
+ micromark-util-subtokenize@2.1.0:
+ dependencies:
+ devlop: 1.1.0
+ micromark-util-chunked: 2.0.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-util-symbol@2.0.1: {}
+
+ micromark-util-types@2.0.2: {}
+
+ micromark@4.0.2:
+ dependencies:
+ '@types/debug': 4.1.13
+ debug: 4.4.3
+ decode-named-character-reference: 1.3.0
+ devlop: 1.1.0
+ micromark-core-commonmark: 2.0.3
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-chunked: 2.0.1
+ micromark-util-combine-extensions: 2.0.1
+ micromark-util-decode-numeric-character-reference: 2.0.2
+ micromark-util-encode: 2.0.1
+ micromark-util-normalize-identifier: 2.0.1
+ micromark-util-resolve-all: 2.0.1
+ micromark-util-sanitize-uri: 2.0.1
+ micromark-util-subtokenize: 2.1.0
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+ transitivePeerDependencies:
+ - supports-color
+
+ micromatch@4.0.8:
+ dependencies:
+ braces: 3.0.3
+ picomatch: 2.3.2
+
+ mimic-fn@2.1.0: {}
+
+ mimic-fn@4.0.0: {}
+
+ minimatch@3.1.5:
+ dependencies:
+ brace-expansion: 1.1.13
+
+ minimatch@7.4.9:
+ dependencies:
+ brace-expansion: 2.0.3
+
+ minimatch@9.0.3:
+ dependencies:
+ brace-expansion: 2.0.3
+
+ minimatch@9.0.9:
+ dependencies:
+ brace-expansion: 2.0.3
+
+ minimist@1.2.8: {}
+
+ minipass@7.1.3: {}
+
+ mkdirp@2.1.6: {}
+
+ ms@2.1.3: {}
+
+ mz@2.7.0:
+ dependencies:
+ any-promise: 1.3.0
+ object-assign: 4.1.1
+ thenify-all: 1.6.0
+
+ nanoid@3.3.11: {}
+
+ napi-postinstall@0.3.4: {}
+
+ natural-compare@1.4.0: {}
+
+ next-themes@0.2.1(next@14.2.32(@babel/core@7.29.0)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
+ dependencies:
+ next: 14.2.32(@babel/core@7.29.0)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+
+ next@14.2.32(@babel/core@7.29.0)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
+ dependencies:
+ '@next/env': 14.2.32
+ '@swc/helpers': 0.5.5
+ busboy: 1.6.0
+ caniuse-lite: 1.0.30001786
+ graceful-fs: 4.2.11
+ postcss: 8.4.31
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ styled-jsx: 5.1.1(@babel/core@7.29.0)(react@18.3.1)
+ optionalDependencies:
+ '@next/swc-darwin-arm64': 14.2.32
+ '@next/swc-darwin-x64': 14.2.32
+ '@next/swc-linux-arm64-gnu': 14.2.32
+ '@next/swc-linux-arm64-musl': 14.2.32
+ '@next/swc-linux-x64-gnu': 14.2.32
+ '@next/swc-linux-x64-musl': 14.2.32
+ '@next/swc-win32-arm64-msvc': 14.2.32
+ '@next/swc-win32-ia32-msvc': 14.2.32
+ '@next/swc-win32-x64-msvc': 14.2.32
+ '@opentelemetry/api': 1.9.0
+ transitivePeerDependencies:
+ - '@babel/core'
+ - babel-plugin-macros
+
+ node-domexception@1.0.0: {}
+
+ node-exports-info@1.6.0:
+ dependencies:
+ array.prototype.flatmap: 1.3.3
+ es-errors: 1.3.0
+ object.entries: 1.1.9
+ semver: 6.3.1
+
+ node-fetch@3.3.2:
+ dependencies:
+ data-uri-to-buffer: 4.0.1
+ fetch-blob: 3.2.0
+ formdata-polyfill: 4.0.10
+
+ node-releases@2.0.37: {}
+
+ normalize-path@3.0.0: {}
+
+ npm-run-path@5.3.0:
+ dependencies:
+ path-key: 4.0.0
+
+ object-assign@4.1.1: {}
+
+ object-hash@3.0.0: {}
+
+ object-inspect@1.13.4: {}
+
+ object-keys@1.1.1: {}
+
+ object.assign@4.1.7:
+ dependencies:
+ call-bind: 1.0.8
+ call-bound: 1.0.4
+ define-properties: 1.2.1
+ es-object-atoms: 1.1.1
+ has-symbols: 1.1.0
+ object-keys: 1.1.1
+
+ object.entries@1.1.9:
+ dependencies:
+ call-bind: 1.0.8
+ call-bound: 1.0.4
+ define-properties: 1.2.1
+ es-object-atoms: 1.1.1
+
+ object.fromentries@2.0.8:
+ dependencies:
+ call-bind: 1.0.8
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+ es-object-atoms: 1.1.1
+
+ object.groupby@1.0.3:
+ dependencies:
+ call-bind: 1.0.8
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+
+ object.values@1.2.1:
+ dependencies:
+ call-bind: 1.0.8
+ call-bound: 1.0.4
+ define-properties: 1.2.1
+ es-object-atoms: 1.1.1
+
+ once@1.4.0:
+ dependencies:
+ wrappy: 1.0.2
+
+ onetime@5.1.2:
+ dependencies:
+ mimic-fn: 2.1.0
+
+ onetime@6.0.0:
+ dependencies:
+ mimic-fn: 4.0.0
+
+ optionator@0.9.4:
+ dependencies:
+ deep-is: 0.1.4
+ fast-levenshtein: 2.0.6
+ levn: 0.4.1
+ prelude-ls: 1.2.1
+ type-check: 0.4.0
+ word-wrap: 1.2.5
+
+ ora@6.3.1:
+ dependencies:
+ chalk: 5.2.0
+ cli-cursor: 4.0.0
+ cli-spinners: 2.9.2
+ is-interactive: 2.0.0
+ is-unicode-supported: 1.3.0
+ log-symbols: 5.1.0
+ stdin-discarder: 0.1.0
+ strip-ansi: 7.2.0
+ wcwidth: 1.0.1
+
+ own-keys@1.0.1:
+ dependencies:
+ get-intrinsic: 1.3.0
+ object-keys: 1.1.1
+ safe-push-apply: 1.0.0
+
+ p-limit@3.1.0:
+ dependencies:
+ yocto-queue: 0.1.0
+
+ p-locate@5.0.0:
+ dependencies:
+ p-limit: 3.1.0
+
+ parent-module@1.0.1:
+ dependencies:
+ callsites: 3.1.0
+
+ parse-entities@2.0.0:
+ dependencies:
+ character-entities: 1.2.4
+ character-entities-legacy: 1.1.4
+ character-reference-invalid: 1.1.4
+ is-alphanumerical: 1.0.4
+ is-decimal: 1.0.4
+ is-hexadecimal: 1.0.4
+
+ parse-entities@4.0.2:
+ dependencies:
+ '@types/unist': 2.0.11
+ character-entities-legacy: 3.0.0
+ character-reference-invalid: 2.0.1
+ decode-named-character-reference: 1.3.0
+ is-alphanumerical: 2.0.1
+ is-decimal: 2.0.1
+ is-hexadecimal: 2.0.1
+
+ parse-json@5.2.0:
+ dependencies:
+ '@babel/code-frame': 7.29.0
+ error-ex: 1.3.4
+ json-parse-even-better-errors: 2.3.1
+ lines-and-columns: 1.2.4
+
+ parse5@7.3.0:
+ dependencies:
+ entities: 6.0.1
+
+ path-browserify@1.0.1: {}
+
+ path-exists@4.0.0: {}
+
+ path-is-absolute@1.0.1: {}
+
+ path-key@3.1.1: {}
+
+ path-key@4.0.0: {}
+
+ path-parse@1.0.7: {}
+
+ path-scurry@1.11.1:
+ dependencies:
+ lru-cache: 10.4.3
+ minipass: 7.1.3
+
+ path-type@4.0.0: {}
+
+ picocolors@1.1.1: {}
+
+ picomatch@2.3.2: {}
+
+ picomatch@4.0.4: {}
+
+ pify@2.3.0: {}
+
+ pirates@4.0.7: {}
+
+ possible-typed-array-names@1.1.0: {}
+
+ postcss-import@15.1.0(postcss@8.5.8):
+ dependencies:
+ postcss: 8.5.8
+ postcss-value-parser: 4.2.0
+ read-cache: 1.0.0
+ resolve: 1.22.11
+
+ postcss-js@4.1.0(postcss@8.5.8):
+ dependencies:
+ camelcase-css: 2.0.1
+ postcss: 8.5.8
+
+ postcss-load-config@6.0.1(jiti@1.21.7)(postcss@8.5.8):
+ dependencies:
+ lilconfig: 3.1.3
+ optionalDependencies:
+ jiti: 1.21.7
+ postcss: 8.5.8
+
+ postcss-nested@6.2.0(postcss@8.5.8):
+ dependencies:
+ postcss: 8.5.8
+ postcss-selector-parser: 6.1.2
+
+ postcss-selector-parser@6.1.2:
+ dependencies:
+ cssesc: 3.0.0
+ util-deprecate: 1.0.2
+
+ postcss-value-parser@4.2.0: {}
+
+ postcss@8.4.31:
+ dependencies:
+ nanoid: 3.3.11
+ picocolors: 1.1.1
+ source-map-js: 1.2.1
+
+ postcss@8.5.8:
+ dependencies:
+ nanoid: 3.3.11
+ picocolors: 1.1.1
+ source-map-js: 1.2.1
+
+ prelude-ls@1.2.1: {}
+
+ prismjs@1.27.0: {}
+
+ prismjs@1.30.0: {}
+
+ prompts@2.4.2:
+ dependencies:
+ kleur: 3.0.3
+ sisteransi: 1.0.5
+
+ prop-types@15.8.1:
+ dependencies:
+ loose-envify: 1.4.0
+ object-assign: 4.1.1
+ react-is: 16.13.1
+
+ property-information@5.6.0:
+ dependencies:
+ xtend: 4.0.2
+
+ property-information@7.1.0: {}
+
+ punycode@2.3.1: {}
+
+ queue-microtask@1.2.3: {}
+
+ react-dom@18.3.1(react@18.3.1):
+ dependencies:
+ loose-envify: 1.4.0
+ react: 18.3.1
+ scheduler: 0.23.2
+
+ react-dropzone@14.4.1(react@18.3.1):
+ dependencies:
+ attr-accept: 2.2.5
+ file-selector: 2.1.2
+ prop-types: 15.8.1
+ react: 18.3.1
+
+ react-file-icon@1.6.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
+ dependencies:
+ colord: 2.9.3
+ prop-types: 15.8.1
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+
+ react-is@16.13.1: {}
+
+ react-markdown@9.1.0(@types/react@18.3.28)(react@18.3.1):
+ dependencies:
+ '@types/hast': 3.0.4
+ '@types/mdast': 4.0.4
+ '@types/react': 18.3.28
+ devlop: 1.1.0
+ hast-util-to-jsx-runtime: 2.3.6
+ html-url-attributes: 3.0.1
+ mdast-util-to-hast: 13.2.1
+ react: 18.3.1
+ remark-parse: 11.0.0
+ remark-rehype: 11.1.2
+ unified: 11.0.5
+ unist-util-visit: 5.1.0
+ vfile: 6.0.3
+ transitivePeerDependencies:
+ - supports-color
+
+ react-remove-scroll-bar@2.3.8(@types/react@18.3.28)(react@18.3.1):
+ dependencies:
+ react: 18.3.1
+ react-style-singleton: 2.2.3(@types/react@18.3.28)(react@18.3.1)
+ tslib: 2.8.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ react-remove-scroll@2.7.2(@types/react@18.3.28)(react@18.3.1):
+ dependencies:
+ react: 18.3.1
+ react-remove-scroll-bar: 2.3.8(@types/react@18.3.28)(react@18.3.1)
+ react-style-singleton: 2.2.3(@types/react@18.3.28)(react@18.3.1)
+ tslib: 2.8.1
+ use-callback-ref: 1.3.3(@types/react@18.3.28)(react@18.3.1)
+ use-sidecar: 1.1.3(@types/react@18.3.28)(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ react-style-singleton@2.2.3(@types/react@18.3.28)(react@18.3.1):
+ dependencies:
+ get-nonce: 1.0.1
+ react: 18.3.1
+ tslib: 2.8.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ react-syntax-highlighter@15.6.6(react@18.3.1):
+ dependencies:
+ '@babel/runtime': 7.29.2
+ highlight.js: 10.7.3
+ highlightjs-vue: 1.0.0
+ lowlight: 1.20.0
+ prismjs: 1.30.0
+ react: 18.3.1
+ refractor: 3.6.0
+
+ react@18.3.1:
+ dependencies:
+ loose-envify: 1.4.0
+
+ read-cache@1.0.0:
+ dependencies:
+ pify: 2.3.0
+
+ readable-stream@3.6.2:
+ dependencies:
+ inherits: 2.0.4
+ string_decoder: 1.3.0
+ util-deprecate: 1.0.2
+
+ readdirp@3.6.0:
+ dependencies:
+ picomatch: 2.3.2
+
+ recast@0.23.11:
+ dependencies:
+ ast-types: 0.16.1
+ esprima: 4.0.1
+ source-map: 0.6.1
+ tiny-invariant: 1.3.3
+ tslib: 2.8.1
+
+ reflect.getprototypeof@1.0.10:
+ dependencies:
+ call-bind: 1.0.8
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+ es-errors: 1.3.0
+ es-object-atoms: 1.1.1
+ get-intrinsic: 1.3.0
+ get-proto: 1.0.1
+ which-builtin-type: 1.2.1
+
+ refractor@3.6.0:
+ dependencies:
+ hastscript: 6.0.0
+ parse-entities: 2.0.0
+ prismjs: 1.27.0
+
+ regexp.prototype.flags@1.5.4:
+ dependencies:
+ call-bind: 1.0.8
+ define-properties: 1.2.1
+ es-errors: 1.3.0
+ get-proto: 1.0.1
+ gopd: 1.2.0
+ set-function-name: 2.0.2
+
+ rehype-highlight@7.0.2:
+ dependencies:
+ '@types/hast': 3.0.4
+ hast-util-to-text: 4.0.2
+ lowlight: 3.3.0
+ unist-util-visit: 5.1.0
+ vfile: 6.0.3
+
+ rehype-raw@7.0.0:
+ dependencies:
+ '@types/hast': 3.0.4
+ hast-util-raw: 9.1.0
+ vfile: 6.0.3
+
+ remark-gfm@4.0.1:
+ dependencies:
+ '@types/mdast': 4.0.4
+ mdast-util-gfm: 3.1.0
+ micromark-extension-gfm: 3.0.0
+ remark-parse: 11.0.0
+ remark-stringify: 11.0.0
+ unified: 11.0.5
+ transitivePeerDependencies:
+ - supports-color
+
+ remark-parse@11.0.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+ mdast-util-from-markdown: 2.0.3
+ micromark-util-types: 2.0.2
+ unified: 11.0.5
+ transitivePeerDependencies:
+ - supports-color
+
+ remark-rehype@11.1.2:
+ dependencies:
+ '@types/hast': 3.0.4
+ '@types/mdast': 4.0.4
+ mdast-util-to-hast: 13.2.1
+ unified: 11.0.5
+ vfile: 6.0.3
+
+ remark-stringify@11.0.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+ mdast-util-to-markdown: 2.1.2
+ unified: 11.0.5
+
+ resolve-from@4.0.0: {}
+
+ resolve-pkg-maps@1.0.0: {}
+
+ resolve@1.22.11:
+ dependencies:
+ is-core-module: 2.16.1
+ path-parse: 1.0.7
+ supports-preserve-symlinks-flag: 1.0.0
+
+ resolve@2.0.0-next.6:
+ dependencies:
+ es-errors: 1.3.0
+ is-core-module: 2.16.1
+ node-exports-info: 1.6.0
+ object-keys: 1.1.1
+ path-parse: 1.0.7
+ supports-preserve-symlinks-flag: 1.0.0
+
+ restore-cursor@4.0.0:
+ dependencies:
+ onetime: 5.1.2
+ signal-exit: 3.0.7
+
+ reusify@1.1.0: {}
+
+ rimraf@3.0.2:
+ dependencies:
+ glob: 7.2.3
+
+ run-parallel@1.2.0:
+ dependencies:
+ queue-microtask: 1.2.3
+
+ safe-array-concat@1.1.3:
+ dependencies:
+ call-bind: 1.0.8
+ call-bound: 1.0.4
+ get-intrinsic: 1.3.0
+ has-symbols: 1.1.0
+ isarray: 2.0.5
+
+ safe-buffer@5.2.1: {}
+
+ safe-push-apply@1.0.0:
+ dependencies:
+ es-errors: 1.3.0
+ isarray: 2.0.5
+
+ safe-regex-test@1.1.0:
+ dependencies:
+ call-bound: 1.0.4
+ es-errors: 1.3.0
+ is-regex: 1.2.1
+
+ scheduler@0.23.2:
+ dependencies:
+ loose-envify: 1.4.0
+
+ secure-json-parse@2.7.0: {}
+
+ semver@6.3.1: {}
+
+ semver@7.7.4: {}
+
+ set-function-length@1.2.2:
+ dependencies:
+ define-data-property: 1.1.4
+ es-errors: 1.3.0
+ function-bind: 1.1.2
+ get-intrinsic: 1.3.0
+ gopd: 1.2.0
+ has-property-descriptors: 1.0.2
+
+ set-function-name@2.0.2:
+ dependencies:
+ define-data-property: 1.1.4
+ es-errors: 1.3.0
+ functions-have-names: 1.2.3
+ has-property-descriptors: 1.0.2
+
+ set-proto@1.0.0:
+ dependencies:
+ dunder-proto: 1.0.1
+ es-errors: 1.3.0
+ es-object-atoms: 1.1.1
+
+ shadcn-ui@0.8.0(typescript@5.9.3):
+ dependencies:
+ '@antfu/ni': 0.21.12
+ '@babel/core': 7.29.0
+ '@babel/parser': 7.29.2
+ '@babel/plugin-transform-typescript': 7.28.6(@babel/core@7.29.0)
+ chalk: 5.2.0
+ commander: 10.0.1
+ cosmiconfig: 8.3.6(typescript@5.9.3)
+ diff: 5.2.2
+ execa: 7.2.0
+ fast-glob: 3.3.3
+ fs-extra: 11.3.4
+ https-proxy-agent: 6.2.1
+ lodash.template: 4.18.1
+ node-fetch: 3.3.2
+ ora: 6.3.1
+ prompts: 2.4.2
+ recast: 0.23.11
+ ts-morph: 18.0.0
+ tsconfig-paths: 4.2.0
+ zod: 3.25.76
+ transitivePeerDependencies:
+ - supports-color
+ - typescript
+
+ shebang-command@2.0.0:
+ dependencies:
+ shebang-regex: 3.0.0
+
+ shebang-regex@3.0.0: {}
+
+ side-channel-list@1.0.0:
+ dependencies:
+ es-errors: 1.3.0
+ object-inspect: 1.13.4
+
+ side-channel-map@1.0.1:
+ dependencies:
+ call-bound: 1.0.4
+ es-errors: 1.3.0
+ get-intrinsic: 1.3.0
+ object-inspect: 1.13.4
+
+ side-channel-weakmap@1.0.2:
+ dependencies:
+ call-bound: 1.0.4
+ es-errors: 1.3.0
+ get-intrinsic: 1.3.0
+ object-inspect: 1.13.4
+ side-channel-map: 1.0.1
+
+ side-channel@1.1.0:
+ dependencies:
+ es-errors: 1.3.0
+ object-inspect: 1.13.4
+ side-channel-list: 1.0.0
+ side-channel-map: 1.0.1
+ side-channel-weakmap: 1.0.2
+
+ signal-exit@3.0.7: {}
+
+ signal-exit@4.1.0: {}
+
+ sisteransi@1.0.5: {}
+
+ slash@3.0.0: {}
+
+ source-map-js@1.2.1: {}
+
+ source-map@0.6.1: {}
+
+ space-separated-tokens@1.1.5: {}
+
+ space-separated-tokens@2.0.2: {}
+
+ stable-hash@0.0.5: {}
+
+ stdin-discarder@0.1.0:
+ dependencies:
+ bl: 5.1.0
+
+ stop-iteration-iterator@1.1.0:
+ dependencies:
+ es-errors: 1.3.0
+ internal-slot: 1.1.0
+
+ streamsearch@1.1.0: {}
+
+ string-width@4.2.3:
+ dependencies:
+ emoji-regex: 8.0.0
+ is-fullwidth-code-point: 3.0.0
+ strip-ansi: 6.0.1
+
+ string-width@5.1.2:
+ dependencies:
+ eastasianwidth: 0.2.0
+ emoji-regex: 9.2.2
+ strip-ansi: 7.2.0
+
+ string.prototype.includes@2.0.1:
+ dependencies:
+ call-bind: 1.0.8
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+
+ string.prototype.matchall@4.0.12:
+ dependencies:
+ call-bind: 1.0.8
+ call-bound: 1.0.4
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+ es-errors: 1.3.0
+ es-object-atoms: 1.1.1
+ get-intrinsic: 1.3.0
+ gopd: 1.2.0
+ has-symbols: 1.1.0
+ internal-slot: 1.1.0
+ regexp.prototype.flags: 1.5.4
+ set-function-name: 2.0.2
+ side-channel: 1.1.0
+
+ string.prototype.repeat@1.0.0:
+ dependencies:
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+
+ string.prototype.trim@1.2.10:
+ dependencies:
+ call-bind: 1.0.8
+ call-bound: 1.0.4
+ define-data-property: 1.1.4
+ define-properties: 1.2.1
+ es-abstract: 1.24.1
+ es-object-atoms: 1.1.1
+ has-property-descriptors: 1.0.2
+
+ string.prototype.trimend@1.0.9:
+ dependencies:
+ call-bind: 1.0.8
+ call-bound: 1.0.4
+ define-properties: 1.2.1
+ es-object-atoms: 1.1.1
+
+ string.prototype.trimstart@1.0.8:
+ dependencies:
+ call-bind: 1.0.8
+ define-properties: 1.2.1
+ es-object-atoms: 1.1.1
+
+ string_decoder@1.3.0:
+ dependencies:
+ safe-buffer: 5.2.1
+
+ stringify-entities@4.0.4:
+ dependencies:
+ character-entities-html4: 2.1.0
+ character-entities-legacy: 3.0.0
+
+ strip-ansi@6.0.1:
+ dependencies:
+ ansi-regex: 5.0.1
+
+ strip-ansi@7.2.0:
+ dependencies:
+ ansi-regex: 6.2.2
+
+ strip-bom@3.0.0: {}
+
+ strip-final-newline@3.0.0: {}
+
+ strip-json-comments@3.1.1: {}
+
+ style-to-js@1.1.21:
+ dependencies:
+ style-to-object: 1.0.14
+
+ style-to-object@1.0.14:
+ dependencies:
+ inline-style-parser: 0.2.7
+
+ styled-jsx@5.1.1(@babel/core@7.29.0)(react@18.3.1):
+ dependencies:
+ client-only: 0.0.1
+ react: 18.3.1
+ optionalDependencies:
+ '@babel/core': 7.29.0
+
+ sucrase@3.35.1:
+ dependencies:
+ '@jridgewell/gen-mapping': 0.3.13
+ commander: 4.1.1
+ lines-and-columns: 1.2.4
+ mz: 2.7.0
+ pirates: 4.0.7
+ tinyglobby: 0.2.15
+ ts-interface-checker: 0.1.13
+
+ supports-color@7.2.0:
+ dependencies:
+ has-flag: 4.0.0
+
+ supports-preserve-symlinks-flag@1.0.0: {}
+
+ swr@2.4.1(react@18.3.1):
+ dependencies:
+ dequal: 2.0.3
+ react: 18.3.1
+ use-sync-external-store: 1.6.0(react@18.3.1)
+
+ tailwind-merge@2.6.1: {}
+
+ tailwindcss-animate@1.0.7(tailwindcss@3.4.19):
+ dependencies:
+ tailwindcss: 3.4.19
+
+ tailwindcss@3.4.19:
+ dependencies:
+ '@alloc/quick-lru': 5.2.0
+ arg: 5.0.2
+ chokidar: 3.6.0
+ didyoumean: 1.2.2
+ dlv: 1.1.3
+ fast-glob: 3.3.3
+ glob-parent: 6.0.2
+ is-glob: 4.0.3
+ jiti: 1.21.7
+ lilconfig: 3.1.3
+ micromatch: 4.0.8
+ normalize-path: 3.0.0
+ object-hash: 3.0.0
+ picocolors: 1.1.1
+ postcss: 8.5.8
+ postcss-import: 15.1.0(postcss@8.5.8)
+ postcss-js: 4.1.0(postcss@8.5.8)
+ postcss-load-config: 6.0.1(jiti@1.21.7)(postcss@8.5.8)
+ postcss-nested: 6.2.0(postcss@8.5.8)
+ postcss-selector-parser: 6.1.2
+ resolve: 1.22.11
+ sucrase: 3.35.1
+ transitivePeerDependencies:
+ - tsx
+ - yaml
+
+ text-table@0.2.0: {}
+
+ thenify-all@1.6.0:
+ dependencies:
+ thenify: 3.3.1
+
+ thenify@3.3.1:
+ dependencies:
+ any-promise: 1.3.0
+
+ throttleit@2.1.0: {}
+
+ tiny-invariant@1.3.3: {}
+
+ tinyglobby@0.2.15:
+ dependencies:
+ fdir: 6.5.0(picomatch@4.0.4)
+ picomatch: 4.0.4
+
+ to-regex-range@5.0.1:
+ dependencies:
+ is-number: 7.0.0
+
+ trim-lines@3.0.1: {}
+
+ trough@2.2.0: {}
+
+ ts-api-utils@1.4.3(typescript@5.9.3):
+ dependencies:
+ typescript: 5.9.3
+
+ ts-interface-checker@0.1.13: {}
+
+ ts-morph@18.0.0:
+ dependencies:
+ '@ts-morph/common': 0.19.0
+ code-block-writer: 12.0.0
+
+ tsconfig-paths@3.15.0:
+ dependencies:
+ '@types/json5': 0.0.29
+ json5: 1.0.2
+ minimist: 1.2.8
+ strip-bom: 3.0.0
+
+ tsconfig-paths@4.2.0:
+ dependencies:
+ json5: 2.2.3
+ minimist: 1.2.8
+ strip-bom: 3.0.0
+
+ tslib@2.8.1: {}
+
+ type-check@0.4.0:
+ dependencies:
+ prelude-ls: 1.2.1
+
+ type-fest@0.20.2: {}
+
+ typed-array-buffer@1.0.3:
+ dependencies:
+ call-bound: 1.0.4
+ es-errors: 1.3.0
+ is-typed-array: 1.1.15
+
+ typed-array-byte-length@1.0.3:
+ dependencies:
+ call-bind: 1.0.8
+ for-each: 0.3.5
+ gopd: 1.2.0
+ has-proto: 1.2.0
+ is-typed-array: 1.1.15
+
+ typed-array-byte-offset@1.0.4:
+ dependencies:
+ available-typed-arrays: 1.0.7
+ call-bind: 1.0.8
+ for-each: 0.3.5
+ gopd: 1.2.0
+ has-proto: 1.2.0
+ is-typed-array: 1.1.15
+ reflect.getprototypeof: 1.0.10
+
+ typed-array-length@1.0.7:
+ dependencies:
+ call-bind: 1.0.8
+ for-each: 0.3.5
+ gopd: 1.2.0
+ is-typed-array: 1.1.15
+ possible-typed-array-names: 1.1.0
+ reflect.getprototypeof: 1.0.10
+
+ typescript@5.9.3: {}
+
+ unbox-primitive@1.1.0:
+ dependencies:
+ call-bound: 1.0.4
+ has-bigints: 1.1.0
+ has-symbols: 1.1.0
+ which-boxed-primitive: 1.1.1
+
+ undici-types@6.21.0: {}
+
+ unified@11.0.5:
+ dependencies:
+ '@types/unist': 3.0.3
+ bail: 2.0.2
+ devlop: 1.1.0
+ extend: 3.0.2
+ is-plain-obj: 4.1.0
+ trough: 2.2.0
+ vfile: 6.0.3
+
+ unist-util-find-after@5.0.0:
+ dependencies:
+ '@types/unist': 3.0.3
+ unist-util-is: 6.0.1
+
+ unist-util-is@6.0.1:
+ dependencies:
+ '@types/unist': 3.0.3
+
+ unist-util-position@5.0.0:
+ dependencies:
+ '@types/unist': 3.0.3
+
+ unist-util-stringify-position@4.0.0:
+ dependencies:
+ '@types/unist': 3.0.3
+
+ unist-util-visit-parents@6.0.2:
+ dependencies:
+ '@types/unist': 3.0.3
+ unist-util-is: 6.0.1
+
+ unist-util-visit@5.1.0:
+ dependencies:
+ '@types/unist': 3.0.3
+ unist-util-is: 6.0.1
+ unist-util-visit-parents: 6.0.2
+
+ universalify@2.0.1: {}
+
+ unrs-resolver@1.11.1:
+ dependencies:
+ napi-postinstall: 0.3.4
+ optionalDependencies:
+ '@unrs/resolver-binding-android-arm-eabi': 1.11.1
+ '@unrs/resolver-binding-android-arm64': 1.11.1
+ '@unrs/resolver-binding-darwin-arm64': 1.11.1
+ '@unrs/resolver-binding-darwin-x64': 1.11.1
+ '@unrs/resolver-binding-freebsd-x64': 1.11.1
+ '@unrs/resolver-binding-linux-arm-gnueabihf': 1.11.1
+ '@unrs/resolver-binding-linux-arm-musleabihf': 1.11.1
+ '@unrs/resolver-binding-linux-arm64-gnu': 1.11.1
+ '@unrs/resolver-binding-linux-arm64-musl': 1.11.1
+ '@unrs/resolver-binding-linux-ppc64-gnu': 1.11.1
+ '@unrs/resolver-binding-linux-riscv64-gnu': 1.11.1
+ '@unrs/resolver-binding-linux-riscv64-musl': 1.11.1
+ '@unrs/resolver-binding-linux-s390x-gnu': 1.11.1
+ '@unrs/resolver-binding-linux-x64-gnu': 1.11.1
+ '@unrs/resolver-binding-linux-x64-musl': 1.11.1
+ '@unrs/resolver-binding-wasm32-wasi': 1.11.1
+ '@unrs/resolver-binding-win32-arm64-msvc': 1.11.1
+ '@unrs/resolver-binding-win32-ia32-msvc': 1.11.1
+ '@unrs/resolver-binding-win32-x64-msvc': 1.11.1
+
+ update-browserslist-db@1.2.3(browserslist@4.28.2):
+ dependencies:
+ browserslist: 4.28.2
+ escalade: 3.2.0
+ picocolors: 1.1.1
+
+ uri-js@4.4.1:
+ dependencies:
+ punycode: 2.3.1
+
+ use-callback-ref@1.3.3(@types/react@18.3.28)(react@18.3.1):
+ dependencies:
+ react: 18.3.1
+ tslib: 2.8.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ use-sidecar@1.1.3(@types/react@18.3.28)(react@18.3.1):
+ dependencies:
+ detect-node-es: 1.1.0
+ react: 18.3.1
+ tslib: 2.8.1
+ optionalDependencies:
+ '@types/react': 18.3.28
+
+ use-sync-external-store@1.6.0(react@18.3.1):
+ dependencies:
+ react: 18.3.1
+
+ util-deprecate@1.0.2: {}
+
+ vfile-location@5.0.3:
+ dependencies:
+ '@types/unist': 3.0.3
+ vfile: 6.0.3
+
+ vfile-message@4.0.3:
+ dependencies:
+ '@types/unist': 3.0.3
+ unist-util-stringify-position: 4.0.0
+
+ vfile@6.0.3:
+ dependencies:
+ '@types/unist': 3.0.3
+ vfile-message: 4.0.3
+
+ wcwidth@1.0.1:
+ dependencies:
+ defaults: 1.0.4
+
+ web-namespaces@2.0.1: {}
+
+ web-streams-polyfill@3.3.3: {}
+
+ which-boxed-primitive@1.1.1:
+ dependencies:
+ is-bigint: 1.1.0
+ is-boolean-object: 1.2.2
+ is-number-object: 1.1.1
+ is-string: 1.1.1
+ is-symbol: 1.1.1
+
+ which-builtin-type@1.2.1:
+ dependencies:
+ call-bound: 1.0.4
+ function.prototype.name: 1.1.8
+ has-tostringtag: 1.0.2
+ is-async-function: 2.1.1
+ is-date-object: 1.1.0
+ is-finalizationregistry: 1.1.1
+ is-generator-function: 1.1.2
+ is-regex: 1.2.1
+ is-weakref: 1.1.1
+ isarray: 2.0.5
+ which-boxed-primitive: 1.1.1
+ which-collection: 1.0.2
+ which-typed-array: 1.1.20
+
+ which-collection@1.0.2:
+ dependencies:
+ is-map: 2.0.3
+ is-set: 2.0.3
+ is-weakmap: 2.0.2
+ is-weakset: 2.0.4
+
+ which-typed-array@1.1.20:
+ dependencies:
+ available-typed-arrays: 1.0.7
+ call-bind: 1.0.8
+ call-bound: 1.0.4
+ for-each: 0.3.5
+ get-proto: 1.0.1
+ gopd: 1.2.0
+ has-tostringtag: 1.0.2
+
+ which@2.0.2:
+ dependencies:
+ isexe: 2.0.0
+
+ word-wrap@1.2.5: {}
+
+ wrap-ansi@7.0.0:
+ dependencies:
+ ansi-styles: 4.3.0
+ string-width: 4.2.3
+ strip-ansi: 6.0.1
+
+ wrap-ansi@8.1.0:
+ dependencies:
+ ansi-styles: 6.2.3
+ string-width: 5.1.2
+ strip-ansi: 7.2.0
+
+ wrappy@1.0.2: {}
+
+ xtend@4.0.2: {}
+
+ yallist@3.1.1: {}
+
+ yocto-queue@0.1.0: {}
+
+ zod-to-json-schema@3.25.2(zod@3.25.76):
+ dependencies:
+ zod: 3.25.76
+
+ zod@3.25.76: {}
+
+ zwitch@2.0.4: {}
diff --git a/rag-web-ui/frontend/postcss.config.js b/rag-web-ui/frontend/postcss.config.js
new file mode 100644
index 0000000..12a703d
--- /dev/null
+++ b/rag-web-ui/frontend/postcss.config.js
@@ -0,0 +1,6 @@
+module.exports = {
+ plugins: {
+ tailwindcss: {},
+ autoprefixer: {},
+ },
+};
diff --git a/rag-web-ui/frontend/postcss.config.mjs b/rag-web-ui/frontend/postcss.config.mjs
new file mode 100644
index 0000000..1a69fd2
--- /dev/null
+++ b/rag-web-ui/frontend/postcss.config.mjs
@@ -0,0 +1,8 @@
+/** @type {import('postcss-load-config').Config} */
+const config = {
+ plugins: {
+ tailwindcss: {},
+ },
+};
+
+export default config;
diff --git a/rag-web-ui/frontend/public/file.svg b/rag-web-ui/frontend/public/file.svg
new file mode 100644
index 0000000..004145c
--- /dev/null
+++ b/rag-web-ui/frontend/public/file.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/rag-web-ui/frontend/public/globe.svg b/rag-web-ui/frontend/public/globe.svg
new file mode 100644
index 0000000..567f17b
--- /dev/null
+++ b/rag-web-ui/frontend/public/globe.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/rag-web-ui/frontend/public/logo.png b/rag-web-ui/frontend/public/logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..6c66bf97b23f187f3fe226e19adf71136c0844be
GIT binary patch
literal 131393
zcmV(-K-|BHP)Gc&B9~&Dd+}`Qq=I`k0@)#K=>FoC4;_K}1_Tl90+uh~h;qKnx?BwS1
z;^gty+2#TS4(90c-Qelb)7#_a?AO@i>g@9$AS>+c`q|s#4GkR~9w{OtF5BDa=IHg*
z*51p^+4J=O)YjqI+ve8T;2Rt&?eO~5*5ecvCL<&=?(q2P?D+2R{Lj$Z;p6TfA29Xz
z|IN?b($n9_%F^82?cd+(@$>o3&)3Gs)#T&w!o$$GxyHc3&hYX6r>L_40SYH5GSAT7
z85<+s;O4cp!>_Qu%gxov%GTN0>Y<{oHaA2$I!?sL(kv}Gy1d5l^Zhe3Kw)BU>+JTu
zzRD^pIFyy4Ffm3yKUP&$VV$0-Cnq+GjF(JIVbas$ZEk%~QC*OdoU*gPK0Zt2<@P~B
zPgz=KIy*-xDnT$XJcoy!%FEw(c!tEr&Z?}nbaaHm!_}>?xtE!vySvJ(thyK(Dz~}9
znwzJDg^*BDV}OBW+1c+bEk)JV>9n@Lh>DajF-w7gm0)3bi;SYUxXnaGQP9%f
zWoB`xskw1-fR&b~=;-@2HBEVWj!8*dPf%*k&f#Qbb{!rzu&~6Wq_cH(jl#m(y1UYm
zlBh&PR=mE(WMq9KBt6g2<;TX-y}iz)rMG>3j#O1`zrWHM8Znxiv*6+PaB_LmXw$KR-^zB*4dsHGqDGo&Xbl%K%IyPG&U}C4cX#BZ*{9q@Y7j?Gp*5DHoF6
zwm?7LKP!Km2-~%W5@(T)Yr=~f#3ji@-2yOgV
z05oX!!2p13IR!o6EnjJ0nsSnxV!5S2toE}bMFPoLn48mc;aCaV47uwvp5X`1fsI#``p8*
zqV)Y;5~58e>U$Ok9m;SctCXgd`L2OWhzh0?c0?H~lU1%?y)p*YV9T$o4_lg|j4r;8fQ>~%QAC49zsUf#
zqx!mE8-gJa1QSFtSW19&UD-oGG$O#_0SE=GsFRTj>KdsB+5hmIVca{A6OkDq8Mr_X
zRKw7rXM)@^gL^M<_@+;J3GeP*T5VWZq?d1=$seC&)o~}%CBA>y@tBE)%S6PVOq40S
zk(y@dUipA+h+#js4-{ez_19`%^cx}@!x)>zaU`h1a~J0pAS?u6Y&R`7F-FuFaD#}q
zsAWDxSo9kJnuVRRk4ljsdJRZ~OF;)9(ajt{2@FwbO*jiYxCxe`6oQGQEO$^q1faTY
zVl;O!WEe<5g(q@yP#Hx^rkp_7fs{d9gyr$RlYu!6l%#z5#V|0@*a;7Z=YczN$mCmR
zFJ5RL-bhV@=jH!mM6}Ra`>)H&hXPOihE^bGSL&kv~=5qEDnCr~A3t^j;-v;Cg>k&2EU4{0{PyW7)r=w|}`
z79$31W4nL02u}}Q*7rM{^%?+t0lw%#J3>-LF(AT-UbH+u
z2fRCi18m53qr@acgt#=5G$?Zt@ty=kh#o#k4&yg4w}rvXEfiKpj0q_02!Os2O70GsQzJBFq<
z>oJB(!k?%9#(o!l4vcOuu3xkXh@lM^>V6M^?>rh6h=4Ln5#b_Sbf&HOMWlupqY?mA
z0G4|RZ0u(M2mlEI3K6OR5D9@eP${R|{>v?zK@6Z&g@{WtLOKgs&UgAQTuAd*!PF5-
z#7XZ$g_u7rZV(iJ56Z|9l7fUwnbpjEW^@#WqKl*sG#?~hd;acti$ATDv2tzq1
z?AQL+tTDHU%|)n!2)f(plG=l{xIM1pGW8+|r(Ted)>(*1HOUi24SOI&bA^rB3jk&>
z1XTZ}4!|Sl@t(v9q2Cc_5b{KnGmHn|oilhQsZwGQ$iwIp><`{^&Z<%#9%bl;8y>6i
z12IqYn(ug9!%HVpw%h;O;J-Z||LEBshWJNe;-WUbMEYG#59jn?FZ{H)_9iwyuCEuL
z7DGV{=aPA^tbza
z0^aAXuL@_b(e}~}!%C!d!_&=km!D|0A7q+YNa^hXsAc|Y4L^UlPW?%orm?0Ch|RUB
zpE3E>d_2F^V7LxWHLRK*kVq6;sKOUD!Pe{703bw&L8U~3Pzjm480=LB03b7jfUbHv
zbS%P5yIa>5BNE}7TDY}K2|!vi>m+
z%jxx>5)iku8})|oT7cSDw_kq|JJ*D>|5xGyz(N2KL=Y9kUjYC(GgsR$06>fgWqyG{
zMGc$SM#L8YNW?@704f2ESvYCtR>Q_M_JDX&*!6f3R3xhCBKFMUARE{vAwVEB#6TpX
zV^7B;0sH$w56^>;cvh*>+>?^CtPvcBW9E;GeDld9NxEHE(!=yg-7c)s>Z5lbcke#B
ze^)-v@4o!*e<%U<|9RcuWxlp+Hwl+XTJ%$VUCx|*fMUz$VH0&dmhkxLg)%}kd*wfh
z?nd$+oOqKa0J<>XtUT_WcKdI9+*-03=yBE3iyKm&?Ulmby9@$mx?qJf5zh6+*hE9l^7aN|3|
z!Q}{4Okyjsch?usv(G#;yXi9dlBB;Eesi6#(zT3(Tz}KPnP@pBUcOu}Ufv^Mf3&Nc
z_x=|RFw5!-E&dRLPiBecN|OLjGKd`q`^x|pFicb$X*^{D#^T^|=rDT|X!0in4tc&Y
zh<_A;gg}tdSuC(DplZCec?5t7L;=A-hcr;qf=O61Nfj~f;@FX&=*TnX@n9j~F%IZ@
zqBPXZQuB}BCKOZ%Kw2l5<~qR_0TPD
z&=QAq`=<1ZCAZvuQeW>`;BnSmoV%FKkr^)m07u`&s=WHkuJJC8Rm4r#R58wFzyAOL
zYw9a!ozA8<>hp|`)rIP?uVUPvE-_W(b+sE2qeN1lvG5vChDtd#sB~_`Vz6rUVuWxNL(IPMKq1I
z)U~7#jFAXoQzUnLFkwLI0)(BSLs2ct>%uy{)0evy5c71Gra@_)NJRCkd#T$J*dU(5(J2>KQOR?n>2E7wEtk;GhqV@)2OWLi7kWp9ukt10lj+0Dxv=ATZ@n#V#P4
zr-(#7#tJ3c2$V+^Ga`e=B+RQ`2ryPI8VCT0A-Wtf2Lph_K((}{{Q^L#m2!&5#h~*3
zo2mgDHo+$xYG4XUYGL5S$^vQK_X3pHR2SL4SA;0RdTsMwS=0PQ!z=wl-kz?%xLYh1
zX?f+IHFS>1xr+?2iPffx4Q}Q*h?Z9?R^8|h@9IBEh!y&$86e`n5_?tD?i?6sX&+9fXo*H?y!n$76kww0q_{?l_E=GD-zLB(J?S}iXKQvm7<9=CL#%QA}B?-
z+gcDJdIJCf2swwfzL?S`Qhpho^M-o(trL=0uqJB_HTFNl?
zFK!AKt=H>qeY5WExrLb}kyw+YnuL-J*~-Bm|NP3;)hEL%&ohAYebe;bT>?O}?;3qo
z?ZU?Cez+1h=)3(<{Wky@dr#P$_n9v|KhCB7A&t5GukB6;H224NQtOX7XAqnd1A~F1
zvI~g#cK~$YBWxIqJ;2dS0!c+yeRoVBaHC~REJ9LRQc-%CN}^V3ik8g)Au|K{=y7Ty
z1&uKvL*D~&W}$V>L4rvDlZvQVrbdF8A4v>7LS>tZ8oE}k!bVfmLy{k-pvtv9w`i{_
z8aPQ(ZBxH)hxe9?blu)8uj;GC+s^|a(q@`A6fXh5Ua#NyYGW+XINZciVe@d1R{sG2
zmfJ_y;Q6W@n!oVh4svz@tHwUdNin(W&_A}TeA66@i+G^9ga7PgD9BiL0(cez0upgO
zi1h;qQzz8*C27|gXM^Pvbo+u!EdrDb2ass=&n(eMTW>dl(^t
z&mOn$K?VlkS(9_-T+h=NO_oN7Q{vVL9(C%Z>vxetq}nn*65()vQy}eTbtoEjZ8^TQ
zM(C0P)iqOl`(t6^Y3SeVTODQlCXw{F)I&-^DP++}TY{laQb>}dR43aVNa^L})&Bcm
zXaG>RS~@cvJaV;rzJ!uK@6eH1Mnr{8-+5UPKpuWC8G{K{oU4V1BE*!p!X=a5t~hBXk-Cloh-K}W
zQ&_F39N}4q&BvJ?}f#r@gh%hIv<;@>SIBN}IR1y^qt0C~f
z)c5taANvp2A|y$B_-d)0NCutJB*vNkPs0&
z*KPg|06%{{pUwr-?ZNv$JMU&S;h%J&9nHAE7f;=4cJljtexmOV~AkrJHZTzsp#GUkL`zdJxzLVOM^FUeYLt;+&4&rY9@1?KC<|Jk>DbYFG0_G9Svx{D_if1bvgPQ~-=CGM?iPB-<&
zPGgoAYv;!9aZxZEnQz=qH{e0x4*)oiV`9!|UCq%%an%RvV&^k?0+PtgL}{xcR0BaR
zN>ac5kTP{U=YfeA3;^@;!*TzosPz3HH!Gn;EF{V*0U)e<(DnOLh9wVQB^na1B)4@7
zb)W##w;Hw}=|=VAxB6D0kTDJ=U@fgu=-(C|YA9hTsZQ^;a(&gla{bxOab`B|vUTX(
zL}zKBMN&8Va8@_49Cxf9?WAP9@6g5P#auL~#6QbJ)srQ>`DgF!e!uRT=C6B(PM$me
zRz0OT&8ic#hE0Sny67HPg^6+wn!oz!y9W@=Fgx6C2Bg$79oj@=_3|)C{~|_|&;+|`
z0MMqY)-4pQzv->zDn!rO!Wo4_DLx@s4TA%PyK2C&H9;6z3V#XV|6s^Xe4#b8SW$Bf;WtAV-
zx=o^?(OMSVZWgJ2rO!hi(-7J$_cv6hy1jo;E)(^#UKc5COCZz-VlBYy_xm5WgVql{
zFRxpX<<%?K%lea>L-M}=zG$#=odKZQxJ~7&a|XcI-)+WtrUB+S{yYF|yI)f^
zKE#M~XKbQ*=a|v?vn3vMqqp;k830Zj#a(y)T(9f38mLYT5gWx0$LTQyc&~BHdAeO=
z1n6LHW|m=mNSvKzkpKt;4FQPB--bFD5m1BxZXZ-YNSK6~h)j|tLJ1sdUI-weu>hJ8
zQR2FVBtUcoK&XFb?rK_`DT46WOjS+w7wPFCbC~_j|w949`q*o2gI|#%Bsn?y1JsGwMizE>1p$BJze$GN3Watt9iTjc>#f3dRX{Y
z^^bw@9*HJ71oohwp!04*q4o0xL;@Pq0^l6G(fqu*XsMlbv|Q&VW#u@n0{Q9wDayFZiZOr8Eo|An1Q-zZLv*Vk1ZfyU&z
z=yOtfR>Mt
z(M(u3o8CLv3!2T=ph2LqQow|Xkl0#*6JqwhC}H0gd?JmX1B8VE(BcgdIoNd$S{Gq%
zI^?u{Y^Z7AgOp+WRVE`>OmMAxn6vNOMco?h8*tz-%-8G;ICkwka1P+G6`L`er$%7#
zfdFUyq`xV3d{y}eE}$^2{Y@=ED?h(qrWI;n<;w3w0WYusqEN}xjhZ=7M*)D`dTjPQ
zXYdpNt{hdavH0^T;%B;y_u=8C1vs|#mz7xLV;ESmjqbK`RMMp8lLt^XFuj-Mr{=}V
zY^^Jt0RtsV9K?@qR4M|1=lXH(bwPRxglRz3qOq~S^M_S18ZeM8X%uT~+_ZCW0MynY
z$eTq+V}v<}))_`3X#6Z=JBvhfNbaW{wq3U{-J9UExFANg=T2d5SCc{Wifvy`s=c^Gqke
zG103DxM;M%b!`4DN;93+r&QnRiwhjg9u6gxd+?H#8)EQi2oOd=JSGezzf7fI;8aAC
z#b`Fl2O0+;@pm
zgMl1i4ais{IO4ara=34O(=7+z$cFJSTi!G&+&|`78vsywIRIWRdltCY+o)}Qdkp|l@^b*t6jLPtP<0vra|D1h6XP^D2WAA<
z1pr1ssbe+$$icDs~z=?Mh_P4mwKKxnjx9R9WixK7|4
z8~RG!RsmYEX9k7BSqe3$&j8>=A}VEEVB!$~N(4s*YBElazXJfVI8@K4O5AMOEdY&U
z6ake{-+Dh)v1}LRvByudfvmORvM#}u<_G}eBBTGYa{!)@JyQ`&t|B?iEde;z3uHF$
z&0Ku=CIb@_iMrSK6mYjNj50={5=_hsc90>?8=y=%h{AmcA*C$m9z)mqp;@ma>)-5#
zAwS{p!FCuB<$Op9@oD!j0N{)*Eks`Ardw)?lIJ^bL@B>=|WHTygO)O_fJ3yG50
z&_vy_@=6KJ(t!wYa;b%(8o&v==UK_I*(JG+Di2TLt%=(ch}<&2ojSQJ$>->{{?^;?9u99
z0YIfBd(hp;10H(BtxLN-5yudo*AtYu~4v
zo7cM>QZsZn+m-K{*RO>He6ak70dSVTPXen-Tc>{G0;L@qarjzP+3G
ze6j!`0#P687|g@gnJLVKBk)umKaevfknaM(tPX1ft+$+AaM0HE&Nmp6F@$b2^9n^7
zHwiTNeQa;rH|J^SmR*QB5zT&Tf^RZXY-GVNU(W`i{7az#LIn|TQonp^QfeCi{{=u<
zX#($Z@FBxn66vpeFXki`METE00b1(Qca+1MfIE^vvv2JO;JGVUwY_+;d3swVO>vZ~}nyKnN76#u*X!b<|1|L(2wf%>O(2
zp*EWPS_aR_NAT|R8DJ$Z3U@9i<{T3+1g0HZvByc1Fa$qI}kDJj*(vD%Esv1bH_0}3F__*n}
z0#XP7M{A~hCV}JwDyj4H-hi(8atgiUk{q%#@{E3mL|y9u+@_mH%O(K6s+9ud=KgVl
znd+K=<6>^;)nwrdO(>{Eu0HF$LUfDd;3G`U7zCxNh;P6cUFhP~=Z7jutuObum@NpXWK=s8d|b
zB|so!EJ+}_&jE-Wq8U}Ewgxg;peO|yGM?Zl>LS>Y(me&0wUuFcy0VPURIV^=Kv?H#
z?~g&%IvJJ&_?ndVTd1t_u0u}<&~6rbxegElXNZ6R#-U3-q^1j{V+xuYzC!`IgkNo&Il-h9hHTQBU@VHGRmU9g`zF26~(*Kw5>gj1|k9iWH}(TP`J{W
z^0S$v#kOS!gp`BvazG?Upp7TYV_zXfTKZeY;1B`>j7_Ly0#J=<37J-}UweuRKu?|s
z8_IdPd;f!Oz_3bP9-g)-$-dA|H;1izzgi7X&+o(jrvM;I=8-kmML
z-r*ZK&&zrDwyayubx$)=#;=rhhJr
zh!;1XOaQ=`@@1&1y$g6_Gyr2v88t*u#e&&8d5y9t94){pAkhirF@?yB`gZ!kd~@sA
zIMM{M-e^S>5c{Gaks+gnTE_*zovks0o9#CX6AaLDN4zd3^t^R!Kuzh2?JBN%P0+PUFF*SvA52yZ1iruv-n?
z?|Ohy5C0SZ)EW1-dN}|NTYRbmdA}$=^@Wr3z2@N00AP#0vSr^N_5oZ0Kn7j7sAJ*Z
zD8-*y7{kd@L7gaU%J
z8ip^w7R4{Sp=(x8%`iOMEBnt`fXSWc;>7?sZy#6Ycrm!=#hvz)z1Rhm5_}Y!&xHwG
zL219_fdSya0GLz-?NmKQ=|;ytVqh#DaLeNzRuoLy5*iV~$8L=WF$W=GIyH}M!R`C_
zf0UWo!dP*vXGhAwOnO9v!9CWqt~Ap)b+HbC5Jq~aLN$%2NJV=e=&_~S2d^0(r4SD2
z^Q{ZIIgB$og-JuH5s%Q|;4^ef?*jz_3ckG=P&R_A5lG7>$jdr4{ea1*Mo3_KZky#l
z#{nj9Jj!|~?29eHsWa@?IJ6qXzmx>(OxiHY$I9{r05nBECDH|{Y8@Lo4CvEp9A}y3
zs9aE;0TcyTrFg7&s!8_K2$H6Q+DsH>)1V-#}hwg-xs7c)|w
z2MqmULG-SoR=rH{_yIH>u
ziR3{PZgdCG`g=_a6U+#tI_Mw1Ti*sGEC9-0$e|TC{g*^HVUSa1eryN2?*Fg!pa7`n#_Xx006-f{
z%cC5+hUGZ|0LEPZr4)5ro&hixfv$(Z6#!6XAOyGh5L+Oq$d>%UhppAGNW1`;PX&me
ziVUGLK7`;937l}o8cZFCxdn`fyv||a#K=MIf+J3W5bqI)2~r0X(h8FR
zz|q1P3=E#Lhvw#Cz4kuS`s;g2x9RD1S~U{$uLDs^kDZ(hh*~!qhT-X71i<;j-v_|N
zzM4ylcv}255dg5`!#WggUv2v2V!ypqcYSaLWmFoj>3tiElX^$}Piry#BNjv_V9y3X
z&lVeZKCjG`=2fm-0GgT%8-UGMJV*AtEo%J_kaNzKp!qDp6h@Z~
z1V%MzNB|6kHHB|CbZeZWnM4{8-XvRtjDQH#B+BcryV?PgR))KE$W7QRz>)I|>Kj_$
zKCD0Wt1t*9{9p~4^3x8NT^_JYOSvCHhpU?<4mWRpv|hd5J)Jf4Zvo&~#QBX2xSdW!
z{kMO;7zJR|b03WNnY!9q0!5vD?Do~|Inj|3-WUy^6?W_ao@nJr1jfdV3jkQ5g%>WN
zcKf<6s2uk1T>$`R!4Qhn#N^4EXju?oDqvUkOCvs}KL^O$pB1Ht#qh=AaT
zd#ZgMaMQlO2Gcv(8rR~)Rm$?HT>#B;?6Gkn$b_hKf<1622
z?6+G7XhQFw!nR4n%6G3`VWzMW<*!$g2l!yM#O~qW1He(!hvolU0BEzXEp71^Wz>5m
zIyrO&UhF+R_Nk9ad@HJ_#hq83URI9}+)qTti5PtVoB_b%N6Z-jn>iQz0Y?Dn>-h$6
zY8Mea_z5r&vr+Zcw=B3K8|^eYXQ^PTGg!?vi3b?%0Y)?1;J&WR1i>(vU|ie;>Wtyw
z2_qo0aSj)+qFKyy$Yc;WP+H_+dGopv
zi;&a(Po02Bn17Q{kh4TwErT;I7BF02>
zrT_<-Dg}^JjVA0}eKz?@-Y)$aiIL7gN#*(>kh*
zb$2ye*iU1%YrbS-JhOjzJM(D&0eve6!}}I+&IBZ9F1T2tbDr9EY(;`lI~`7RgPkAz
z&ES1Q+WSd}MBUdy>~a&O_Sd$FLI_^!zw6~LuiEH8XfY*Ry|-Phnl%12@n3ZTEX&6_TCw6
zqeF12ZG?^UfgA;60%+dI^+B!(h>aly1o>SjBM`{ByrhJkHi;`Uz>pHk55ZTtNy{N*
z>{=p}+tM)W!p(MQpldv}eQ1VvUi+mGL)2v8T6eycU?(ru|HUudR`!kOx0Oz;mH{XB
zC0yZ56!sL|$-P(OFB3UOIcVh^ChB^=n9+LHa@%_mRc%UVYeM*={J~G!~H`lYGVr%;jdeOH{311A#D5kEq=Y7OsmPCyNZ3b
z;Ap(hdjNkwX?W7V*{Afc{us^XXhOF3|NxppO^R!(dmRp<*#H^mfU$k(9J!7a9ps4H
zjjJ934CDZHpqr407a!)#ah9l&Ku#M)Lv9dvL`@C^Y1yw5-V96ml!y`m`GvUt_usvd
zLg3eptom*k5_YdwtL4KFtMUKbH?e~DzY2h=V0F5GkkHpz-Jf#>ouuUHp$xspgS?#m
z#rWy(0bq>IuN}_w08lw|fV0S;b5U5QwPSn{O)&x>mgSN6Pq`S20+kFu$B6~dAs(SI
z&bP0&rRewL`_#kMHBF>3u4YH0s{hS
z{?Vq8lKle)a{v%DAF6tDds#nW8|G=d>2Mn;4mI|EtpM;-yStiSKRi9{?ob32inRnx
ze-;4aaEU{Eeksste)p4nGB7
zB5tC+c?vBO5u(DLl{uFi9pgwuUp>`$nIRsY4Un+bDf^HMM!>=h1DH_KwBC*L=Bybu
z0?SJ{pw9E(Ht>x~FKrwpY?WxmKP!^B)xz)T7O$WUnkwaDyuaEl{S4c8gGJx9ZINkf
zh(CUN4%?@;t+hLv$%+CklRo??pef+{zuIhw9qQtaD`AXZxty)m#Q5i4$ZfPMTv9I)>G>FILuqiW}#x00i4^IbcyLcoMr?tIVX+qi1)|cJVke
zkjAHKNUZd%GaPn7to1i7N^t&w+FsCZm1tVw@#
zeR;L}{dadyiw{2_Em#!90-VT9Wtk(b1olt+0C{*b4gc=)Y^u<{d$Uh5K%cq)&jNaP
z7#I%6mjJLw0EA9xdjOnRg6O7+kM2A>M(ihjmZ1~IvzdPg@1_vPa8
zx+Vy$wTiqt#&P{D0AO6+{=A|vKz0ZFj$G3el2}j|RWq*)tdsb$nv)o)g5v2bTM+}4
z7N}K8L=l{m4##t2oU=IGZ&uHnLqY6cxaF$>3)19Ks97yySGP&5
zYt!YjN1~{vKY%ifmrC=mH%+LZ!Kw$;$Ydk;$fT~8=K?^v`Dw!|Tb0Cq9&*JVkQJDq
zKBj8__N>_7O|#&vriA^YdJm@{|48P-;wl&>a)L6it}DnO+x2B&>t+T3$Q2g0SF1?7
zSfuy~kU_q(4p@R$M<-ZEY${hgEub
z-nA`SqR2%2xULz0`tCJ)|7v~(L%0PQ?bD~%f}8HUvd=Fczg%it{C^t&jJGe{%YRSF
z(n)GoI$V4NfSQL$VeHoqL-23^@UgnPvn-QrXbQ_X7^+aY;x@Xo*(Hj+<@bA*XmtXwZEvNI}G64d>
zaMUwS9t4a4UeHyCev>%&_qz#58JyE8zS6J?G@0+=F!g!xG
z{LhC4ay8vo+d|D^d`EZpqd)xUDsW9BS6|xI1pnK%XlWCuTUvx)1E7P$4+sCels^8r
zj9FIdzX*UqYC1|8xGByLdQtI15-8`%%#nHqqvgN7N70e_pJn@Ou;;dYixv5t(96*j
zynY@IejIFv$_V3q$#^V6Hx_weQP-}$FE+UNvJ=Db>=%#12wlj6knzL-O@rGN2Hq|6
z&}bFk$nb$~fg*_zD_Z5Y*V@nlO{Q9Cc8wocNzoL>wp0nsO!{9-{uh=Au>N3*n$|yQ
z4cUJb3L%0{?jB8HNe2Z?|HrN{X|uV0_|bJufdE>_*T1XApdUS4U)AJ-7W4?cwQEQI
zcL5MBze(^H{`WM1lfQJo4oMU
zU5nO-8_1RMEJ}>&*Xjq3980}Q-E!{h-o|`4oTN4|+Yi|s=g=gn>7k0DEK&m~kUgNN
z6LH4
zgZz0{0Km~FuTRZKEBKYt8tJ<8+!CTL`g0E{Q#|NCuFDR!L@sca6uS>3_-kX)>RqbB
zPQOTsc~IlWx1xAti{D>c-tXp%b%)d!O;I6r?FpMw9`0!cbHi_L2037X~1b=o|
z?%9mE$GSW$Ib&t(6&EVLZ{li2uzriDc_!DM{4h?Rm8e@*e>DqJV*kXt{%@8OGcz?f->0i}g_`g;4gDpjHXZki^eWCuC!f}j<@R|2P{)bYwUO9hMazd`+@NhRYfmR0)1I*fC
zQ($1Dnx9Q=ga)5Te{41)o2cX0i(SEA>$yY<3?k5LEW_Yb2C%`UCDI-sw5bdW
zd{J8!g!Ihs%EA^hwwO0`0bHZhQ{p#Ry%{_|<};2$TU+`OR~t5tWwRjH7e*R@OyGZf98*bWhS-~b&l|v>VuC&={0^N3;W$Fy
zKLY?&b$5r5UjaaWB8|GZ;3Ti)qCdGlDVn7+gX>U
zQVzELW??Z3AZ7P-RbR^kXn%AWTs>V~DH-%EsQ;Aw2Z{V$0MN7-!v;R{y;GDvcKR?h
zqb1Rhaq8uhy?%JOc#S2Aol}oJZZC>QDCPZe7=->Xe_oJ+kt6uaj=!H6zZ66~u|>z3
zaV=aDJcR#6X<|i-B6Ni4b5P(?0Gin3*h!bzP@KPxnT?oNy||kQT`35mDdM1n0vd%7
zpj6TaUr+mcwO|0@Hw1aI;oAmocUz%xS~+%5L+-l|vjy26Oa&w*{6mt1U;Ktx!y@;h
z;ncbTI!T?hUWA6&b(YpZvcJWAef3rO5!Y-ST0i}V0pJAu_lNV^@;*P842pdhc83SS
z-S|1xDrWj;PJRv}ZOp?f0B8~(N{9pV_W*z+3HY?>Yh^P%8J(2<(bCVYj2WHHHd7st
zrmimY99d^r#G&cr4~R`By@GK8Cu0i`99=|pB*2go1ayW9ZfUpZc!&=wW2EqFPjia3D>vXxbFjy^t-j#QRN?wy1qe6jA=JH`-@t@j;p5
zlsIr#fSVppL5stxL}PQmEf&dq{-Y;#g#Jmau&1j4e#pNHfVZzZeSIDPV*Rb7^Eb{l
zPmeZWCRw1;`S)V8CqExnetjLMPR6G=JcM}%i66vo_V3QTP>VT+2T+Wh^Y*;u1tvT*
z9`tP{>0*O{W=6|%Qx-=pzK^8>7P|i)xhJy66l4McIK((a(#_>M^+*8(O{_GcZG0P!VRws_gjJ{>P3Rf7mZD)
zC0Zb@Jn;bSZ3AmGZR+;ANM|K?{_19zG<1nA<`@=rhZm|HU$3wKlK>cAA9=UuhA+iD
zI~!=^liYJ8h5b-Z@UU3(%T%ww^a0ZJEwBGbERhp%3V?x7yhp-taBI$@P8xGOk9%az
z@#hBWE=fN$TEMBA8AAjx(D|6LV)U&io?Cb>Uc(;YdP#Qd$BrT-7Ao#Sxk|Z08-{T~
zAN$Jtpfl8#>>G!sq`XD;?ZTM8ZJMU4=mEfGqrao>DEUeL*Yk>2uC%6n;I<=hBJ|gxn=GY5TM(7t%_0`9wXx!p4A_Oz_gE>!rb-(-fJ^jz$7(4&|
z{7}y}M3J){LFJ|tefMYr274an`KyIX&;g+1HMeAV7|ehGaOwlNar$)VGs&X<007-M
ziZ}hy35*jnjW$){LAK7A!iigoGsvtJ4A2N}nulS`lYCX8F3OmnZ4izAuZRFqK(4=V
z8d|b_DNn@k+RVn$L@nX|r0XWGENmHZ6&J(fsnhlO;!?TCKbg*?LuVdfvDmzNaaaHNzb8qtm_%?Ez&t*AThutf5mQn
z^`8U4iHQG^3Gn@!^NFVE+Rr!~HGot6`&5sfpYBiR;|acj0G;XUQUAPWB)P6aGgdYJ
z83Y-3APUfwJT7(}E@$~63~cYTxl@jshOdpAu4GOsPk=Z;`d#Jz&_-_Q)4sFavC7ke
zz#RtQ^){Vf^*stgsH$eaD^X5rB!E|}`eO#2oC-4#xVzKN(6jlxw|3R{P>pwMJ4-9W
zx?nYdAf7-m7o38>BVd%Vc7lIzj1NatnVIb;azxZ>muFMJK8*5@v9r^s8@`7O9eDrE
zs$Pcb$h-pp5R}Y(mc7yeD#B)Z@PQ?>JAu$97m5&9!NpeWUm;qM_HV^{TwWac*
zG6(Y1(bc6|gIeHm_vodF@ooe8f2QT2gDNc=+g)3{H&QzYyeq1{LJh=o+kCNzY^(ed
zZRW&`GW;p`t;zOYV&8A|qF8VrIE33pQ+<4_;0>i6NnrRQ+v|Y3z`Ux_IC0RPCv+i+
zXqiXLi}wP+)epkSpT9H9-3BCQ0YC;f>>v#DjE!N!&+-T7eG!k?j6B;B0E!E+;GbV0
zL2Gv>H=kW*5E**FfOb(ZmXE?&OeW%HO!&5$Q1yK=eB-i%?uqW@z~DfVc(UQ!1cpGb
zxIqk$!0yT+mCJ|>?wmGiS|-cMgd-L{vlWbv6_QQ0gU_Pn%wan?w=VcJQV-py3?=z%R+da`p-_+PInOPd1Rdk)a|%cLC3gvPolVL!)w0
zj`R3r#ToO=>eN5CyDRuS?i1<)E$!5=;vi>7HbDlGhNF`T5z@>F=|^~E%Q!|k+dRIZ
zl=s*NNkm~EBv|W~f-F__=h+6VT8|IGg;Ih8R{(K>C0vfW1uZpisKi^?TEt+Iei549
zagsb0mJnpYCA&Y-Cqk&ux2h@yU1N8FUC&b6X(PhQRw(@W0u&_06UoFftuP|V-vPjM
z;xzK|-2lKPpT?tnDYHR6@hkv1QpOwUpd}SgrLXt$@C;Ez(>WSH0|14SmEEh8B|{Mm
zJvStK4m`LzbCz*;rGBTz3^k{be}up>G}JH{4}VsAoHl%@J)ko=Y-6kLKHo*>rb<-E
zVFZC-AhnR?CiiAvZX7Fk5`&Tw9i1uoQ&NZ57PD`~3hnPv9AGNtJ!@19N=lXR+-kqJ
z${_rKfC}I>^BV8Jwj%tIEiN(oQQ&%t``8E}rq~7P(~eAksvL6n?fbg1)hw+!eV#U5
zdwpH21aReQNybnnjPJf^&9gjTmwm?*K=5iy*0aNLz}Z6!QjzrTSrc%9t(bH=gjA5N#YzPnH9eq`l#vA+0OcTjpo51bO0_Jpjxx
zQ#8Qf0Dyyy!1vZN->Mkp#PeuhNggS1PwU}|lNxqjp&X5P)PrrULOm8VOj{DGMYeB_
z?fKKMsP$Z5wXI&J%@6Om!wdxB6E;QKZM)5UK{D}*0@56!@Elgb!&ggt9su&xr*!YW
z7XZvi=u^{hieSbH&|Z%dokRLjeFa@MfVH2poNYoHG5?d-cN&nj*_b
zk>m)vGA90!J4@O2Pvp{9pnY10GQC!}9P^_PkcO)7;ud
zVzJ?Mrf8G;JOIL<^vjzv9K9C+K4d{N4Q+K8_wWuG+MHGB9J;1E3A*0$CPLJIfA5Aj4Ns_b=(&=a{yrU7ZYalyG=mOgi_EL
z`mVL}tO*E})Fp%G{5>u^3QkHS7qgd#f0&q5e!vOHe14|ma^|C06@lnx!g&&-Gf%!^
zLG2gdLGvcK%R!1cT74M^d5~tCLSjki5@|FuI-PhuIP<7nxMz(r&fZF?93q25|Pa;xtw*!T;{XBAuSxZ
zs{eMEAVd?~qEJff62S&XAL%Ag|Jo1T?o(^=`m|M4TG_S}3w1;Ca@E!8{UTUjZnjq)
zNnjR^?171?F%IXVwUY$Te`t)(4{7}n=}S&InqYX(?HD4lEh*2NfgTtR44?C)b$u@J
zWdMN$USot20Km)rI7u)$9F^ql!b6E*|H#lGnv#hQ@cpWpml!fFv&e(hG19c2Y)P@K
zTZCJFvrS@yuuLm3L`i2z3&}RbB7C`4j{txqRQn!F;Gi7>wYVTi0SoFGXppN)!Ip=#
zs8G$bPic$z@A`loq|qKAh2ndo2H{N~J;Dr+tiTt2XzC`Y5csDc{bp&fHPs?TCy)lL
z=6xzXQ#L03*%!^+E*6&y?I)%>F#h74g+%@@0I=Wv8PQKE`{{|^%T?EN$%1LPG)&~d
z2r$8!_1<|h5%AzV5x~b&RPwN6?P~yNati>Mdv3{?HCO?ZvImdlac?&=CzngFkGrb0i7Bd<&2h3ZKlfU#L4>+QzaCJAPq1xfw3
zsv28R{K;xu4Fl9lG(j=Pt-QM8wNYE}^`GWf*H>5nCjgL&9{B%i_mL8iJ#cTEX_iwr
zbq)U!K#_=ee*S(+7>d{9I`w(V8|r_24FIPBU0VqNoE)wvQ-Z00^=aDrlaRNIbO)rRnY3OlWu4~(2ybXCF{xIyQ?SksBQUsWP>v{>5`M0y#0?i<h5)
zWM9``O$ERdg0bShBq<&j6?nO-Q2`r>fx*-N2>=euLR?H{4gh*nVIl{F7-gfWggSJP
zizxVp8F^^}Lb!bXGV(LdS0Wgb@Yxu0NgO2ZlZ8MJMWK)|;6~d$1WwM$q5E$ti(yUX
zq1q*WPYT{xE%bo8-&li$A$R`p3+4GS`UH3~`5x{=x=TxB&UHwyj0ox+wKQ?7gQ&2(
zUI#O>Ixs#&_FHdS3_M!?!B?$$uD(Vyk@IXwE*v0SX
zh4q@2pW0M72M`73xGAOD2!ghtXXyV#188u9BtKXH#^I1hVt++04$$EE#?>b^1?g}e
zK|$=CL^gL;5b#!$t9AuD#DThn1bjjuGHX%Et(Ab0I>VrWOvcS4LOKav8@E*GU8(pC
zLEn~9B8E=;spsQ}+`;-$Uf{|atnbI(>%`fJ4fk%uEV319z;jB(wo%__WfBmlE0POn
z=_8dxq_4>=%K{RW8`No{
zU;wPLblfG)rN8{u|AP;3aV*?&%!vaSr!$&4nmMYn>k*M){yqTY0Xow0H2OG_B6k1?
zUju;up4r^P`T_fb@E!n}*HF+)0{s^!gQg(L25s&Qr^Ln^4
z(b4wLN*Df*jv4^SVR&LqQ#>rk;S>c$bpW)zmub?@I$$6>z7B$18Qm#rpqV96EzVE#
zag~%AKxq0cYNP>4xP=I0NDA&(2=QbRv>-%^k+1+rEg!YDSbVa%stC#^c%`XsZ|gwb
zzgkN>V>M#YKEH97X%9)Sq~FOZ;IA6h69E8i!?YjysoP_$XxH~B7ryNJb{#zZ^6L5*
z*MI##006QZGJku@*8qUwf#JvD#o~J(FBI@20nGL*l
zeoowO75LL6vXAL_34kfAkc`6h=NnOC!gGaW%GB&icmRob+d-yo&Lrvr;H5Ngj4cnn
zS?6CSMV+{!KyJo-pkM__-FOwnBT>3(3HOpT8DEH(!WXt~#y{^mCdd$NNV8MCzbm+;
zZhB)>4j>{l1bwLYSv4#kh=_xRRIaBM#o1S3o0Ih;$%J0G`&wcePnXpdhQ2R7s^>G}
z2YdPShu!7VH332aFMy&E34w?CRZH#g{Q#I=MB0gO`kw}XYwZnBlS1)}eV%Z;Ab_6I
zzAseJ^G=&dHS5DeKd~+eLkawN3nmdCyxi4%Oy+T>^_u*bTM%U!^@RzXaqdz9fVtn#
ztYqd9Z97L)0F2|z>w4eSUTlk~N%2z*8MYEEJ0rinjO4=YBsc}5eA|epSO|X6e)zGM
zF^?yf3&iqg7N=-&UZARZuXSv}=B_U>_^!P)lJ|ga5ha9YS(IXAQuGcnD44?r(EiP(
z)~#5IMG>j=mrunL>k5M>))A*6p#AdV3SH2?8vqDu8N;NC|I+{n<@oc#3k?vS055Xe
zh)s_IFQIJv*@*@i4j{)!mLvkkCSG(e21P+f$hBnzzzS^fOS!jQHIWNQIgR<2vmPur)(Unm4vQ#RdV--%YQ-xjDLvno&Lv6!1?jg
z2MD#SG>YdO05m*?bLrhCPnir$e^>{~bF$^|`KH$6C3jxlMB^7;fR{qCl^3vg&AcFq
z5k0SX)E{5s2IYRMlEZ90FC?mjv)h2=(L{(cW3D1LQ^>|-uUB{wu4%}S_F=X@1Hca_
z4M@sTqaYn~f+>*lXOYjKO+@)>Bd$n|26}(>Ayx*FH!OF83G}gO)6FhTgr1<7-aZz}
zNa7c2gG0Zx)+g}vWUSmx$cqS)s8!|eS4$hW^PCzFf+0RUdMfYAUX4|IIa
z1UPPhlk9%OU+3TZ69)kI;7;X~JsS2Shws7^2~Rr)g##mpop~n5j~8}plSD_oa(~{T
zt=y{c?KO|v3Naa~w22GzC4X!E^wUEyvkB8tEg?U!oFaB9C+a`@Yy$l4qaCGM7R(Vm
z3K&2Vp}k!sH6~xwK~D*NS&g`zefW^3(;$uzKM?`;M-5`_y*7fjjwyd%kH`v~B;+IRm`=j}0=Fkqqbj6_fTrP&|5=`RexH+O3Z
z%PcHbxQLC7)idPV0R&!3fVSy%VO42EC-S+gT6Z$teOw=-0ILkMX708M`o2hULEB9I
zyp0TmPt&V_t7606bFs*$Vuuo>
z-~~KfJTLq7!e?D>Zeu2Maj6uT9Rqb`f-#2>tT$P{qUk#zof4x#D
zlrZf6=ukX5On04{;p6X}e(=JBJKyxH$>40tQyd}&56_V?Fu}ik6HdHXK9_Pliu}j{
zDPim0klB`i*GoH!a&X5zqMVsKHO)50{>!+N5ztmKoMC1B%Y}$uXmCn$V)CclFEzcY
zJO%;O$`}PhCU|>WNjVo(0g>F4Giym1E1;@UR&v;+Tvl#-{f-oDl2g<@cK6x`Z1Jn@
zltzy^wuHVFx=e=PK#9kOG%)(X%)JJF!tR^BvuITdXC(Nq0pK{_k7$_E%{Nh0KY>{B
z`t~_;1Sa9@_nm-W17O&hk!0S{6JYTE)Hh{5gnb9S@1&iL6kv+7Hx1RH?KA>6mkS{O
zF^B3AO_h+5z@0OI<1Hwtc6)T(XJ4p9DBe+|<^1F<;Na!ussJPB&woKTzWyb
zj%2>+3MgdU@fB>On<3(903IA0T%RYkE@1$@hIix%rqqKZY0#Be3<0oeQvEQb8+=&B
z%0gA^y4A+;b;V3?n^InD*HbaZD}h@Qfvw^S74cXTL;`?Mk8@ku`7W5@Y5s=+aDL#t
z-TNFSVttO^7PM0hufC9|kgH!W70dLO%eUwL9zK9>=tn5(BLId9vQ&SVXPWDRB|f0u
z1ReWwFyTMa_a<4C3>E$u$tJw3SHQ)44&82~_m%L;A{I>Nf0y;N=6|UeQO^0utrp^H
zwy^-Tc#O#8dRg0fSbo7qC0J3hdYMp$?GFHeWe{ade#(sx
z6A%9+p5oe>4Cx}p*=Ed(ff{Ne{n@B>E4TFXLaR8#*F!*Z-=@Yqv`CX#?@8u`nd6sn
zQd`yLzDGky{uZC_)4djzif!dKg^>2rz#-+V8+exV%*t0_cO&;t0H|gGwE&w-
zs9F%F-y8wZ*j)P(gN;vpq)ni1lrBh_-PrK?Iv$fXipE+LMi_waVUtMp!cG=GxLjGj
zBhuzRb%dej-fcOwnWl>xj}A3mR2dZ`=zl3!Kq;u~GFDu_mO5}(|S#Kwfs%GbpBAoPNS`h2CXzhJOcG#mqsk}92VXzIW|E3RspiebG=;NA%+i$aBQ?)~aMv9$%XPB*{0Jy#_co41{CiFOYNq0Myc0fRp*xKXlI&>iW_Z
zi1dCay)aq9n#k54Nz^!vFP)rB%N$XuMk5ViG~oSbx{YfvZlL_}>ZjJqTXaJk2lg{9
z5QhnH+WLYzNS
zOj4OFZibDfOpF%~vVfT)49F1s$d{JkyDZ%e%!7
zs#qz`Z{Dr{i#`BKQmL53?REyX6z*fqWTqvANpp|iNIrb_jPI7X5jwZ+j1tCL}eZ|
zB1aDo#0EGdv0+%~2g)`~Mivi6i0JS;+ADJ*mPvwPX-P&xPAMc^rP;2DQZ
zS4Hj#YMh}*<6LRIk^p_RjOWp+jF3L?(bf==d0sZqB!g3)84fm3Glz77USVi4(dc0j
zzBT!>C?a|ggy5jhGONAko$&}b(Gw_sQbi?IAuraM`a~+eE-=Vl^09MXLjd)sRUwa=
z&3B2?Ra&>?1L0;LUta=X!LFb@xrY6PWvgrs9k!Gjvi)}1k3T_y9<
zi2mV1nQOS^jD_J#4RGTC-z0v~8HB`_M*#Q$U`f3Gj{q7)T0;k6v_hxA9p>Uq@np#ol69hWDAakT0
z0=1m{5doP&fR1>RDVj;^+bui4A_CX@FO2jC#+9fCPen738M;)aRsu
zym*~h8beAjQ3JD~MLo#4zg64_K26=tYmaOnjRA5M)Ht@|x7R6O(^Q-V0;QlBr?miS
z!qKw^2yd4@v-`aO$lnV9vU&_ux^%!vb13qoh&ui|)8E^t3PW>)*M|I<{w~s|4Q#jp
z9j%i9he|P!roTuXQr|+r4iXn5@x7r~;O!etG6NLG&+O0u3MCIgCh=!U#MN5fn|p}O
zz4(ZHr)i`nz_{&fX@Hu5=6S6*6cGe~YB7r)bi8)#msN1PV`er|<;8V{kPrdVMraBR
zf<+NvBM^v+M7@HzWOg?L0PQxRZdYszr4JuKPzgVp^baZ`Ql!Uy_}8nF^!QLX1yI7>?B8R~17L4FkbAVE%qaj|9H^c9<)cLbmpCYs_nPfe|A)+;7#jkG@ekd?
z!jA?zfh3W?9|6S}MQW9dt+OKBtXG=f@pyWe*tuDPE6PR-n|Z5tpC(N5Sfyhw5@f{t
zq6%P$2+8W*KgI;(itLrDFDxnlnq03zHW2hbXs6fSQxPYLsYAisyUxAO(k+0J^EMSb0cc9YqLy!A@Lq+&~fyI-^4Q
zqYSc&E4L>mVEUK!iJY+qK5oE21OUS|bd28%fH987gy2p&+3+;(%MkAP{B?5nOoXVB
zSf>(z(g3F`fbdBO5S=iBLEQ(8Z0`*u92o(>Hp{p~Sl2C#>m;*{e^ddj0XTJ{wpR{TS$~eCvl(RZFfU-5`r`7c9
zTb#h2{Jf!C`<|ECTRtN?QQ7A&Jor%D`65T#bGeFQ?5H1ctaMO>5x-HN%EH*~Lz{>R_RWXd1qD_m
zzc{8sFfAY%bXjTEMXl7avjZTo03;`J9h|_U3CPRk#f0{}_W)qzzh46o)Airu>
zCz(Hm9&NB?mEK$c*^0f3n|lBQ2w3<-P0
z{lfgww1n>uZ)d*OZWERDU5gjc))w!93o6r>=bK>{fnB5zo*LYfCahz&dj
zLOcRI3NOI-pYm-oakvO-CcbvJot(eU^*>dQ14v%DYRFw3FggdW!DoOOwKh!p!?av>
zvRSn|%$u4@AO!MY`9-$|EMb-b#Gv@^q0E4>ub;(py@~?lLT}psO0T06iu_*#KoGd;
z1KbV-)A4R4O>iEqrD<-reT&{5w~^~yaTksNh#SAi4mdq7Q2q5{$YftZK@b76N~A2E
zjigE^kuh*;5I*kmdR{NyR~(FNrkt(5kh)Z8X3blRE!zKy%OVw#Zl?I?QPXe_H%vNQ
zVH%dgg!{8!l^izlQYPO2`N>b8KT8)qI0uVJJAXPNen=dC`Iy~Td|Rfqu?}i4A}C+U
zt2?M7FH}2F36^^=y*~ZzYNm4X99^gNsB)@~E8i0Eq5iQ4qgsdS*R2mO3_n|u{YAlW
z4WEVj<j
z+?Q?)iJd0Djr*ofEP=mgp1D6-q4&?j9czTtet5;!9lCrbDDKdlKF
zJ%Ob=je7mX?koFD0
zCh%=RzNP(Gh;Eu6A$_nPUE1x6>z*%he7QRVfDpeFn?R~NJ+CAxZ`D}6vgc;ivEdzF
zfJnUhgI6{^y{y7uB+m;iJiS|P3y17e7pxsPQIgQ0Bj-V9g{ew9wCu$mAmZ+F%uKu$
z5B=u%zjQn<#m_Wv17XAMSx`%o#mL0uv_)c8&ePsMGH0>*C?`?9W)Ib`KC5ZtZx8TA
z0BV>yBo{Pk`9dE2mgC|l10ZCpfURvC=qByF3B(r*KF-MH7X*Z@QWAgM&gU(NON_jg@YB09cEt?gIT?hFQ_JGCIzN>fxZVHS1jJNr+giCN+_dqHMf%rd&u>it*~T$Z^SAv^(jx*?%aaChT10{e
zYUX_~d3zo?gq2v7hJZw=_a^`V8$8HOns?hB-%lHUxbexSssX!4M1JIfNb*jwuw*|f
zcp&KFU(0-Kp0|y-WAiDQLFzf?D>NvIZp9}amIfol7G4B^`svS0=6~Ym(*)+>$^=+t
z5c-CDc@4f4?n-30>To=|&}h*S6$N=u&W6<*w`+_=61F^HxHACMl7?1#6un$XSZF0N
z50JPFZ24|9
z9W#9%kb1q){6lg!Bo}f*!@09*w0+4M#$Fm*)j4XYS04&l)lF^=77}LGDxX(Gvhr{T`9|aLy%Q*jeHaC3FRqUPa5?L;_6sq;v_U(56z2n+
zZ|nY7%0LOb$*yhGP3v~6{^SG-=mNlupP2P;thG~W&Q&o7puyh~$9Qqh_%*|_q%S~!
z(-aIi{?qu4?%eOC`@1qSf%%&@ZuwaANNK(bWU+Dw9G<072D^kB+n3tPds0OUCVH$4
z^MHwN005TArGoI)fPrjr->}^;8QJkC`)bb44Y2n8C;()Twk
z+EDjg${uX|0MvpE%xRZHw3rgZLu3~Pd-Ipi^`c1BTXi_>oF;Er?^9$UM7ALSr(0Y8
z+y+2Y5+6H>S5&am?$4)SAly99Swi=lPr3F1ECAe4Xs2j2x;{T?i3YD-bK!HTU`)OI
zqG(nQ9O!|Y*H)mBLeMsfl2TVw@ym+oUMdpsM!Zl$5DtLH?t>zk
z(Qd=1ymplwO=MJg#c6)Vy@7{
zz
z3aIQr$v73I4zmHdC3sD?se^rnoS2I-YFvw@A6+{@NfqxG4Xg)WNmJE#H{W|kx}kK*Q)~D
zI`n;O9HpEp(7CrzT1urss|!lE+o}z#tvO$<>HxAZ*%8*{K9x3B+26SRRRCdGGZot6
z2y*yshrlf)wGvIPEu7~ZReEZLRx~hGE%VM*IQR=XzlZ7_y8-S6M9NR4m-7H3=p2%l
z`SeiP+ZE9P>hHk`ZeXyY#Kh}bs%kM)%K;FWQE+@A?=wO8#?V5xv5V5ZRP3M}YsL&cSp}egDfFIrK7LC>KyyF{vqD^-_CSv0(=ahHM!Bo8Bw9>Td
z2dwHg?R#tulit<50#|+YmkPhMOOR_O`Y3X5AeWLsLf$Jt?(hJMdg)7`1(XO
zNr9ke=CkRHsvRc)@RRE2`HQxwd
z_lNZiJpK8HNznHn_!VTSZT}E@zEo)v2?ezrRJVsn{_>=v52;&ql_)X9`&=C+i97r-
zqrAi85!WN3d+g6#JW=C^s>~sSiP(AmXKXm8#>Jqe9T*b4em(hQeu
z-LV#~aMl1C$j`QIs7HJ?S#8;kFe`PtC}YGRV8Ie5DU+?~-1>pXdMnR6_nF*0XIbT1
zVCnE+2saA#2;=vC{etjtQBhv|H36N5<7B9!PXk&Iv!ESi*ZOrI=O~RA@ff&HiDUz)
z83K-}CBtW$Cg)KU)(R*8c9y)O0k9FvpSM@-*_eQFIr+B$!1zxA(8LYi?@j=~0LU`;
zm%YLAV*QVAO@Lf#Yul^j0w7d%qPKm+^EsLGxPTPcsBw
z)hyBas9ZoNme!PmL>m_N6*ls$voO9RouI~G)q#~qX7xdpje&`|A@@vdEQCWzO#hT|
z99BRcWwPR}8Q32}(2}xzN?6SF^?^%QoW$=yfVlK^(3UmtX#iSYcCT0R%5h_~chJWG
zK#cz#09?~H>ijtYAhm|??)B`1D)rNEE+zo3hcj>ARc7fE@ynok%9O0b1ev!@s}Mg$
zmlOeqR(;?hwh5t#&3u_#)()||DL;r_w&X$ALM&+LW5dV0fDSmO;5-x<-^b}
z-FaK|xa4wGnW_0+AUZL{HLlk>VRf))IcJdA)u&5
zEnTc_*;2C@%il|hRbAmO@+W^Touz#V-U-V_)nxneo5*nF`Tq$3U8<1JJzfJKrIGmJ
zgr?&3pEZCbWh>(Hk26AS14f=e}l#L{8dB3e!5`=QGw1tkRURx2OGKuM%aR^*-<
zXx-`tWX!YaX3i$VYA9J?2{0=}lk)Hfu}#c>4ZnW~F7`vmosj~3lt!#Ysr5h*rDwh6u!+-@%;D;ItwnLIC>xnel;
z{cXFK&}@H_Dd9U!pl*7t_~LkQ_p9bAkdE!5IRjPt-nC2hb?dXt(j6NF6Fp9iM2KbeYRq;f
zcORIIHmct*iN_J`1f_wrRS?8Vu_~CWC8r=bIcAJnk)DT@k+FPZh1Nbb
z_Apc3@|6#=kLYRu;j1>(n{h=;L4|DV87d`rc#J5zpi&?H!*XiNOlSiE1txsJLk&E=
zsNmV-Gr%M7Pd(JQ;=z#wDpjvyrpTk2by@6nss|DU1Uto%u<=fcmTB<~0GK|K)BEzD
z6y1*w0k;Yfsr#Eh=Pdw8J8Z@a7i@zIC??QZ{@TeG5AI?kc5rL;w2=;<*ggrCr9XB~
z#7pPbsA
z2zCL^>1Gf4yLU{8EYf1{7yHk(X!CrzCI2>Bxbzz0416wIHdRifWUZ1r8FR{$t`hXw#5T%1`POS?wy@p8O%JmZ_K
zWA#mhRon~Sjspb`%H#kXgi@uKIiy?`Clx@q$n^6{i!Qm$||^tK6zmn^O~
zgR>pIqWr%HKrWI9`0NWwTF#e779;S=bCOKBoqAo()=49KOjtHcrb)9g=|FbUu^t*
zTvsX-Z0p2${!L6O^S?fc$~|Ij-eQG_cPlS%7R)40ixdo7P$kF?)Hb6*b=N8f|%Q
zanSo-x^X@(y@73^lHU8$Hy^O$$5cN;`^W^CUb&QoW$)1e=Y0U)ngE2KXf2N&fJBIs
zQ;yx?_Bb$)?7v0VrRNX;v}XY51?Da>M%N;J;1JItaeOj(6b_dlFfpECda!>Ire
z5+hSp%o=>CEfzkAug^awqUdJ?m<6XdPGJUUsDighpnT9$96=s|$Um4drxuJVCxW)$@LGf$p4qB5rtn4)nHo^!2>^(65^s-SM?yz&*Z&e!5>8u
zKOC6;x_CVYG$a~L7p0{28$Z0f;uzcVkp=)A`PJ&*On;Y}
z-)7@&7%2OoG@tqF-Li-cU)<9EsfidVXGwZ>yzMsB+S*dZc+mhU@9Yn#2yOfl1A}u3
zn@3PqDi=b1&r-Ux22*W>03bk3n5n6~y+rO_dNIL2F9_%=ar_?#0T6sSCr8tHnPA^&
zCb~r7eC)zf!e6O5<92xzgHd<|mJkR4J6aM!8IMw5Zb#7tl<&tN$W=%>5F$&UqTo~=
zJu3VXRAK{prmF~A6@H*NMraG?8PY4z(e7Z(Kggw-e&)_HfXX;3Kk}b1`ei5FFd59G
z*WFwCIHLedZGmTiia^{5(!zaZ{CR#p`n0_NsS#YOdYR(>b{V+SeVRh&Dn|X8nUt|Z
zw_=Q2H>v`23M);#ydW>v{4Rv5DX77%tOzt8gdy#!ybqVzCYe-mTXL_;3f)0i>M}69
zYG$I>SqUMgs`*sslOX*OL?@}UN#BMca9pV(z9jD}G=G?FpFrbVl(T&Km9DL
zwSlZGPS;Zifi1ZnA^{b3#9h#r;EGDB{pYK`-3CBG8L`dZ006lM0`h)i?i>N4&9>+@
zVXntF`+=UfdIuf0v0HNs0NUDAv59mm-W7MFN@1>t{1hfJ*0-be*(3%~d8V2(wf?D$
zdmc;)`T4;NOQP)mQ^;hBm(3_20B38ez7LD+oACPVWXJ1z?@JE8=S)7Z@GZ2}*6W0>
zX;tJ^(5A{guL)Yi(q}P5M9y*wm%lGxP%X8pz!8?z2M|4#3Vza}?1!=`EK)3^+X)8h
z#it*9A^=3)!;(!`#Gj}TG>z-+#*?J2A$-OW)+ZqVXM~tRk$({U4`q6(k?azJF=9Xo=sR1UBG+y?(l-j(J7(+Ey
zRnzvsqH%VR#@K(|<)eGn=A>RmBN9iellaj9Hle#Y6Vvj3=wPwCv*(u(iJ-n??oHR)
zd~KDC)Mf)^DEVTj9G&f2Lb(FhRodO9YV%ejA=z2};fu-lj=c({^<*^0DK3H3HmIbb
zsqIAqUCUt{eTm-2U)0yMz|R30BZLtAffayNp+f+UOZrK$xOfnz63?ZCaNsF~`(W&O
zFgM!DqQevu(Ud}LqW5sxDW|kCq8lXm2!O8QnOH
zElOZl%?HJM>gK8_%W1oSju?)h-54LpX4=@MoXjn))}k6Pg7lU$$ob~{Y<<~OX$m>O
zm_n$3V}1)IE54qZzZ|ko>J>Uzacq#J8sc~N=c%m)-q74TsE=X)3JyQ7Ehs2!3*e^p
zo{E1<6zD)z(%EK{9?9wp4og5R%jQ%h-0PBY_>lSp+cU4VER1}b2Vx6vZq)x6067(w
z>yDsvFj>%Qz61lsV0%Bro=j~GtYt#3ym#IRJLz>jDpo!Bh~}mSXjAn1SSUkskT%n5
z9OV4_Ke`hDv?q(DedT67KMEx9x4;D=Guvv-)?1!u
zs>pWu_ptM9qj2f+Ogg<~{+H?31NqrWzWgeBpjTrjqLY*H;0`kx0zjtyr=DJ&e1Ez>
z*3X$20J1N}9*}6hu9AvIzDLr-j|V4Rpx}{{>o%zWZ0D6ml<^@2U|Yzb?6LA7tuwC`
zlKG&?FCsa2Y3Q;U`X3DA+s^J+&!tX@<9}%aav+f6dcUQIPkz>-7gQl&ItF+_S8FV@l#zF7cB<6JPNP#7rB3}+}
zUnS-GYlp2tV)wLxs*30670bM++WMT8*OLB8^Yj8;{{WbbN&sXA=YpPi;jfol~-#Tg+V%*idMgUcyMQcb$13b>SFR*rI`Q5^VVv+%9p_b;b*ej0BS&$zxK`L_&C`G01K;{MoQtP
z2H;`^pc>@^xW;-wHuIM!rP@=4w`V6%&=m8K{GSw`Wf3am<4yp`wp)))=z?}}Rgi>S
z#s5i0G*EEXYOJTq&@EjU%g2@cwo|f$GP%I9
zZDCmsp+&q^^7=ZKndY|=eJwEAcdnF!h;S@iOmOw5`)4is4G8zZr={_rC_F7kZqSCp
zGj;=_La=OvSGVH&t1{cnzJBF9t;{a!}+%WfUSQ8SE|@bp4Px8GZ?eJ6L$^+
zi2_vl1PNHfN-Cf+r{7r@S#oj$OOY~AvOE#@Pi`gRkcRg%&t|oqKllWNquDcbaYZO|
zJ$}*=A+}88HHoPX8SZHz8zO6iOLMtpNaV}ck1$jbI-Z2zbe6_x(JV&neqCfGGv0i&
zsYQ(QZY-^3oS&bO`Vj|&Kg;7T35TgZ`_t?PY+wJYI#XuyfP3+@s_w_((X(zO9*k^9
zy2CqW9io8#ANfYMk#Q4L+7g2Qhhbnkl6ptXZ-(s$zkMm_rcJ4Y(bSV(aw
z;4^D)`$8Var9DtNyrv8YJ0^2l!^Vl4m)QvORf#6x5l3Mj+<3l@;U$hNmUjY2I?^TF
z{$!i!tf_C5O{tCLAD4YTRsrP@kfdWoohhTBz)X1?Ch!ns+;`e_(znvtzD$8+`Lfjf
zQu=i}IL__rx!gY&mLZ^DvvFd^)au=$yCxoFae}1urLq&-XbOa
z4*?)|4TK$VBwn)rd065j}3sB=MJ=|=Na03QT
z`@l&cRfR4-Mt2|(;=(sIfLveh1OU{!rT3_@GHETQe_E0A22`0ASUD69AA!tnMycuo$n*
zURMc)^DT2mQQCIjskyK*13@xvn=zM0ZzhvrcR_udt>fIrXkT(k8-lme-;2LeR?J!t
zXQQ4fsOL$OXm*xp4UvY_$F(-XW2N!$*ru<
zG!)7aX4@(-jcw6Sb6Pun7r``>MPtrqiKT)`pm$1V>Q&FpgR`iX-z%0U{)bLrv57gJ
zx5IcO*Q<%?FwwIo{3@lCMoj!0eGibl1%RzaLnbFr`i>C4T!coY{RkAUOY=jJKt<7r
zm0N7XQ^+sZv6UqA69D9K3oD3k^ggkzSm${7qm*>TQzZnD)H8Q-oQ8cy?AMkN0Jc;&
za9L7;cZff!Ev*1D`4^_x*a?ZZDY<)HD6-c>wtXtguIipT>3Il@pcY-{A0LD5BX7Or
zYDw)>XP6EUww~tH6O^|(D33I(jbwzWn)=YzJb322jgPO)c3Cnn((#&90GbCkVe#ZU
zi$8Hn_RC)CEFgSsOR%b?zG5D%oVovDDwv_IMZa$vZ3{F*VKSH2Z<`2!R$Ia^_TjQ2
zcx35+2LND!&BQ}U`z|4#4~bm*{%`_7GBf3`f0_9JdhsRz7$as<`5@6JR$59=4C0_F
zZY9PxZ%XY~uWRf6#+9xUW_A@gBTtX-YDB7iniTiz`cEpO19ep#8Yqxsl{X>&|`Jv~&%51yu1fB&gJkc>G69Md*ug^b1zn?
z_c0Sp8X74M((F&H3JVUD0n^qsI%Y}`(*^xw(2NEnIHewd_z&?($Cxk5jb|GQCuT0Oc!HntIOcM=sv
zu{=3}Np>1o0e--V!^tRaIe{vw2j(>s*e46+>nsi5HF^)cp$E8gywANJbgI}o8<8Mp
z+zvz##>o8>0HoAQ0U)D<^YT0;Ris|^`FvJK<^bEeAWob;+oh5p$gU;oWJ1ng;`H6P
z`U!C-$FOj$*0-`9M5jyV)J;eophm%$VtIV3P3QC16QL*Tl*^I==Z;_O6b4qY5SeKTJR7RyB{%@l%g(mZG?>l|ul&BzPznIL`4|9%@Wq8x``t*t!+~0m-@f)1FsCiP
zbK8cU-jH_U&%(#*Y4zF59t)tSJpQ*6!@#CcQDrwt#0O*!$#
z>~UXyIYo#*AihTHfbV~)QXqPqV9`oSM*CNT3E`mpf~l6Bz(KTURVjdhnqlymrUdw?
zKA|^f+L*H9;!WD^`xK!8$SK*7DR~ISB4I}{8WnFEDL`zdpx3VS4LuPE2QolDB
zR9IesScIi<8vsmOW$tbQpv@x}>0Y
z;N9~1rhtd09DZhSaKsg`d>)??zdg31Pq}kc{M@H|^Z{SR^PBHLu&mLrQutu1DigU#
z01zLfs~4koKO1>xaTqF3Fe_srl5!{YuSb)xBp7e3!~G*hP+N*LRNg$G7oP)*{_ksm
zV!`5(hD=LD5X~y%Jc`bGnSOE2b$=`QD^evl;
zw*i3BwPvte?~D0Y+Oa*P4MMv2%OuZl?%sugc3g{OIj^>q?T6|zp!?aarXsoH98fR55MjsA0i
z1u*@kJ_9v`CdRMpL532QymhE~5GL6xji=B1Qg)rHgvs==
zmj6CZ`G(5So4g4pcs_T~rW04gMOaT8Y$zMaFjo+|D#MSb3YAa(kvi72hbD+62
zLX;RUm9|V#&>dNk#RopX2n~4Ga=`m~qU6hH#s6;skmv6MfT1^I(eJ#UBOv5P9wAST
zW88Kyi?lsw#LBzFE+ENX@FAyy-$;`}L;LS_55Y@P@qI<0V@kF}+v&!|VD(dG*8XGE
z8+p`|wrbo|oG@wT`=CxDZ#7IOO?JZYVTGD(PHd^o^ga%RQJ3L$_v61w3OZb^iZ1-*oeaOJ^*&52{nCJ)RUX9O7o-|
zrxnC-HF89hpJh8X<)nNrU1WxQhk$eVXAr);Zig$wteMP6G%Ks*tO2kV6S2~G&GXcl
zc5s+K|$t0{p?CICPaG0#|kbqmUqn{zu3T7CW`
zCdVxV{N)S3J{P(c^oKrxJVdGd_!^RpBJNjs=Ho@qqbpj}K#@!O@7RFzOUC=T_zu*T
zG|2$~vo|Jt8-#KW1Mz19{Y6RDeO;Ai7X<+M3o6ws3Ap#B1HSGnqy1u)6$&QjhX2H1
zr5avA98jRz;ei0Rv7?Ol=U16;=IYcIe5VTJRsg}*P2kfOHFW)~gy_ZKEmfaU?tw(K
z_J?cyXJs26kBbs?g#7DgC8Oji)XfsLKe(F90}1jwUJ0K#>*wnT1m4kRSxbP9qZ1p9
zs`}yHem?-jOLKJ*H<>a6NB`S8>TczBZjY!P%D?}bbpE>yAbC72kEtCf;o7QIeKUQ*
z)41%w#dwER*>d12z57<+Il5TqU*+^uUR)|sSIMbm=4mqkt1KM*{7Ml&PlIXF$TLfw
z`3i-)RYD@NPF-7Ztg!7l-H%zw)eSiKbf
ztF>l`kkdQSu-}e<4*+TL$5(t{Ou&2R>rOg9SMtx=dPY4?c@|T7SAu2}h!MA7@f`ph
z^ZCj&ILi{^1i6!dv*_U_x^mnGjOLs(0Iakl8XxcJ2hc(~wgj*C0YX@Oc0C*rhW(Rt
zW5rkSQ0|74|Lijj)aV-E1ic%OTHuo07YD`Ft&r3%jk#lAPBvi;z?i8WQ}#1BSZ;Cf
z!JhG!F+a)It}JT{_nvVMw8a6ksarnOsYwoU80wPYoK<;c!6`2qEN64Yd=j~R8e#|O
zEpKmXsW5{LXX=H)GMI5#Vw*drp{5c)p$|ynN^t)H064wh;8eZqMT#6~cNBLEpfdKS
zc3|t-vJF44003TjD;O-Ufg$U1DZd08jycAfDpX
zI4NA`CdZh#n@crzrKY%;7Oi)Y{L5&xB<#K80r>gr;?DBFOykqqLH&>vp7~!6IcC7i
zTd0D>>Pv3QSTG1Y75hv8*mi;(b6@|1e(I$CnFS1hjifzDG45&1&?2GG1T>7Zkv6hU
z2M9g#8$1d{jw#1_3jkFdDjz!x4NnexUKSt1JN?pCUF)n10?b+9@=PO1&{oTS0{~wA
z_W%IXyH5+Tvu*%@TL2=sk?FAyjBy}VTYhwM<-Enxz*_(yPENW2U{uzDALahd9gn_k
zHp;5F;OL5$Ia*T
zA-IH-$uNgX%pb5+G$KDuz!eJCAx7m6zU(DzlKj{J1p<7M0l>r?9_zY_9?B+h@~&ON
z02~`R4aXT{za9_4t~5Z@!jlgGaML85UfgZyJbtHP_=u4U0G7%sUD_~H#&w3jF;FDY
z0$e+LCeRB|v~<;?Lj!2GAh%g*kS^{T6$v-T6!S&pVau7}I@1T~#S&*v95e~jtodqc
z;?Fw%ud;JkO)4JP$q9Z2!V(h&7tUDMf*Z5;n3DdU5oZpsc#-pG6yZ4?`f
zY$dT6ZYWl$mE9ScxETIyusqQ~o!@m*2LAJ|g*B6c>LUO;Z9uoGRqK5a0Vq_4?jXGc
zCe?S+jfi(igr~*GQU=ahpdxMz*H|Kp!q;QlqFugN^uuqPfZIMnd@BW+CGtW$zM2GqYF8f`TDCk(3#O1`T`Jnv5OhtR_{cG@NyH?T>S
zZg`nP|5)PxSuSt`9-J|IOI>mC*piHjoLciD?6xEOGbXAsX}A*-5)m&=
zY}_Poib4Mt08UUT+E=X_GyW?lu)W*e(E)T?Zy=9(h>K`1s{c*Q_a+nA-Z?1?xp5Uk
z+wqy4TdA31^?1C%wF$7*aAGG?V4jJEh5cSGz0=G$v-pH~99ni7oFM>Ld{)h{5OW;=
zLi`o);vd?E(tSeHA*8_tE+^!+%D)G(d3Am=zcjV&7;RZQ`Q+WcN_8EPiil{C-C?I$X2DplnAC0&Q
zRw*UoTKXP$hWG8-7MeOc$(XkikJ-#yVc@?8K)Tsd47s~y0#0Edk6C3j=pnC|?lH}F
zwRDTVP=1Q=FLOS!MbA$`Yz&JSvI3C>>gsONVgd4VYn-@SW5Nn2q*m&_|=f~n6~i7pRC
zjNq^oIT|@wwz-qJ7@X*Z>O}**HTi#|XlUnAWqKiacxw7-w`eSIwUMNMernUMfhx$l
z0EB^`6rw1ev;?2d_saV76V&Al-{d&Ai$-{Eo%kzOdRVSX{l$*k?#am5^0@z+?_n9^
zTxCAx{y%gM`EY~Pb&~eGYf?xISM+fnd{b0BO8-1ADPj?i`&dCtx@~K)2|M#-&7j%5
zX~}<($ZaC?jT_^>B2o!bc2lAhtJhZ1=B2cg%oyml0d@loV
zQPTjogm%`8Z+s4;3Ps}(H)HaO^#mzz%|o>b$a^i^z;69oE(A(~(oTgh$s6mDU>x@+
zqp5G*9cL_Zj_Z&gc!!a};N#1`=xw>9yvtAEJWdAxsWhuZz+BUhvw|P8uEN7IK!w-B
zLDt(AU9giG{x<*s^#Yzf|KjRPA0psn-#-Arh&ezHoa;x4o$_X3NX~l;fK({y7A}n$
z-%clre-J|%1Az4wRAD>Sil11C=nE}&%HcVclTG}k+x_`V
zFETm^Q=~KCIBf|t^8|nm&G_~KEDWO5
zD~W?rS?J*PxA-prAXbQNkJ0z3)cC&!K)mv;YqBP38?SPHNgKFdUj=EtqX9%4-u;ud
ztJig<2EyLKUVCiEEF0u4A|a|R5D`iw8Yt+fpd?Cx1k#Z}qCil95JD7CP#`2ET7+oe
z0id8mJPIB3ya4-+z2tDna()87_vY@+Nqo-Dd^?_>Z)SE#9?nEkntS_*AyK27kzeQ$
zC*(5z9bW%n$N~`5^7Dsk6IrxqANGOLZinNDne1*Ge0Gq$_W7s@0w%JD&~{u2_(ocV
ztmAQSwP!}>Z7fOg7XTO|d{O>+XF0XK1-S!#2Ukcf;l@4iD_R4UHtY{L8VpjAc(<@O
z`<2vgqo-UenIGoFG_C_j%rH0A_mc)$C}HG@>fiZJ%>Xdko1_C`{U&6u3O9m>HB-cO
zvB9wJssX$PYol}J^L4j`2&y-?3;^DGw1alnhDredC*u=4oB;sAp!AIQA|~wA1Oi-$
zuE9RlwS`_p7HB83Kp6>xkaU3aKQ(}y
z6^aT*baMilJW{P0vP6)R8Q?0xVUG-xdt6Lw7
z93~W3M-H$i0?SFie#de+WZ(FlHC0@N)n9#F%I?8ZT)(i6Vcl*%-eA(pY6Cak>bH|M
zI%u)nn2?jgF%BnDw7@3Fj?+(UiFZ461`;(tA(mt)F|#enPP{UheW%47&L?Jd__TYT2MHjZP4O9JbxogQS&BTm`~mx>|#%Cg6U&;yWhf(w7mFPz^I!s+#b8HMGzz-Y7&af$*oNU9G_I%xDzF#88i+D>
zn0LbQ6!$*BW}ki=ID^6ftA4@UovFrD^j!haAfGd;KC975sr~ZJFY>Q!1HI@4h$Ydu
z*$A92J89cqR}S6)6E^{nNXgwlXNduT`5RQ82|N`|TH5@JugGi)ja!4|Ckl|viAzJ!
zyhrv>0)>EPzzRqlC{4S2)-sI-luOuXiEJb}N`M`lhEO`Pn~*-(0sv%tvUZJQ2)1Q_
zQiw7RWD_9M++~jsDV9kgP~t>v3UiA?Yvrd4+fs7Y3X$~feif2g5w43HBY6aHOEHRu
zq$0WkU=&0n^IGj|ttE_FB>}nL4**WODZ26*-=h7C5h9e2MF5~UputIAw@;dqKKA<5
z=8ptP{{Vn#IzsBN=mG_G-OM4al4+E=FUWYxxmVn%QVNQq7%_pgo#h{8H~?a9*28}!
z5}RJ{g4d~GNpd*Njoj|EfhwLZHy^e*dM(DsX$y8`1pw}TVVtMoYGoo6LV*1qul9^Q
zfRKxG=BX@UGCHzi1G)x_`A4=7yfaj(hxqaO1Iqo(&=al3U?r9i0gNcxOJd7{w`p11
zkkK;$ymH1ne+arM9#hc(ZXU(m$QcT;E&~8hI#(p_ZU%#vkm4Z%x9kF7@{H3b+0G4p
z;jU6XdU6>3GivQh#ei$Hq>ty5K8c-+!vPAeYi4-q&Nm~!B7lSS03zMkM@tMzjZCH*
zPr{8NU_m}P^M;|GWB_bdnw3_&q=Ki@T#d5e)-!#)H$uS;@Eh7-Nn%2cYtSt!eSlmr
zw%{e=K7W;jtjssYr)LDGLA+7Ha|dSALd&ip-GA`R&XQ0Gik
z^1On#Ic(WR^Z3Z?Mr)}2x_r)z6k9m*HA$RdR*Cq|NWV&5`)O%;>Mj5%^*;Xs0N2S-
ziBmwR>@FNt?;W|uk?vsN6A5yS$N`we@|4iieERqFSSyIi#xe)mD{+O%acrr-T1oFn
z-Xi%c?l+9{wan)g3OA-U5e3wUC<;6nLp(JY^uE_ORxtbCQV*-E&n=mb2b*lB*SFen
zhWE$)hXV$>$ej7r)+iZh9@r8r3kxpa^lFHDT{h96%QIvcph!$x~wvO7fyv*;aq|H^jIAWXbR@(A(1gS?@u*C9nn~#LmZ46
zX~)8i*Df1>EB$o-9xm{&(ai(@_tCP
z=6!YvM-oi_J%g!5Afxe?-64DD*|1~HWY8HPzOl9~s3SHMStu7cF|ZL2$+juN#9SeL
zGy?#?C&+x9E#yzCfPiKItYhFv7!q!GJu%aXXxo&Rs|FU}A^@rza2^0|Ap^V!fD;Cm
z0nqGq;U`cR3HnP=rECgohW80H{kb?am1l|Ft(IRRAK$^r0RSzSes}saY{)kOcz6l0C_3Zl(j-B#cnMmRtIFk-W&tn3ZOb3@t
zM?xvhuw@p~4e8l@UiS>gyDLRl>dHG^=0`I$K9R1gesCODu{iq->z}skXhS5gN!}*z
zn&fm$*h@@@_E}|va1Yz8TV`&<2=fDRK@tncANN#fXcK!3Od_f0w>?`lpek}@;Vp!D
z+E>ufIGQSPfoy3}xIwoAq;}wt7Wm3t!J#2Buhfl2^IFmXc#gBbVY*)qfaZ~huHOp_
z(WM4zZKp;`9XVJ#58m=u)OQ;xs?HxAgGo%f@kg!r;aeta4{Qx5gsQV)u`H1fZd0F%
z0@;N&0s!HgcYk{Msh56u>8Y1L{q1K!rGsULkAETWV3Grp22r3x;5*tgIyfRCu2O!1Sw53lZtw)d;Lt%NpP!0KhUW`QdTotaSF5
z0YFHDVz$4sOWJ7Wz-XwJAH3zaWYXGL<8?quHd{wKS8m=13u+Pn4~(-s%%Tv(vPSbct$tLMew&cG4e)G?bXn*?a9p2Ov&1{r#^1kRaC5
z1eofj&2GYf0f6q|0Fdp(ZzZT|N@L!^hPgxo5Wz8nqHVGTuP8;b^=k*btNfsze$_
zg4MHl$;D*!THOKwC2_1f(8BBew-hw$D*yM?6Ucdt2e@|l=T{nci3T9@eP2q-8*l?mU9+^shAAV&SdG3Afq;0&C=e0Xo{hH{E
zcyE<|9UtkJf`+=pZBLp24S}ySO+1_{*SUuSKw28iXI3(=kfu`d{%`=mz+Oq1uw!n`
zTSGJ{^()H{2LK1e{%E^3mJL{8&A#)EGi@c7PjbC_^VTo<+DNU(3pRx}2DFxe
z1P~@^sMz#F&2lCyGqf!P3&({Sktbi&ujWuWe}5xbL2cdwfU`-S5|L{b0N@<#Z+CjV
z{+s;A&N+;ne7F(#heL#DZvP$Mu%l%{1GWn+HXx`pmu1xIeDGEbA#B&yUSI=+Az)h$
zmO}4?b1(pWt$5k8v!APe8MY%B_*9oTS9FU8Q0EukyPPyYbnZIEOYgEq#<#A37_s-AzQ72q`vbk2
zK144Linu){NitY3e_U*HDHpl``EIHl8&j82W->Be6G05WwEo0usbX!lN}_V92$
z1*V{pQeR{G%t`Xv`e<4z#QgW~u*nxXWI`AdmP3z$vVHD!ZPrAHX*weTt=NM0h!ST>
zzskp7p-{8F^No9V0I2iJ6$@~jUv=gT=#=W3r6a&M{FWYib%6%(&Vdh@EPnAJkIqE*`GlqP4UO>Lb(c;irm~sIv{LGDt$RGZ?nrk~AizU+=Wbo0=&Yu)@-D
zJK<>vUNy!kpJYaMA4>LdLWhYgjzCiYKu9pi=QE!Ps*xw|d<3Y4TFTBoOw4%G2_<}8`744P}
zoGh-u$?#2*dJJp6{NYZ!y!a8C0tJiQ04M1v;n6Db*ue@!ct`f*erSz!4~omc9TK#(I1t|s@4q;cQ79?1@W
z0RRHoN*oW!;OzD(8rrt@A^iLm$0}ZgK*w$ZlFl+mYQJg-EjiId4r)Op!Gb1
z)GhfI7a4&_^p_;HDcm=bL7xvl0&GpH6jBvky?_FWwT%}5Amy!1Af0Zwpi66MX1z~D
z?aO!G=PL?ew4_?|BXzV=OH=byk>=^ih2d7BG!4^@h7bnB|4-Ik?~9NsJHxY+m+
z+_-D4V*UpJz?MzU&CZAbpt%SDzs6QpndxN4H7EH*O}@ZXwQGv&-QyVKyA~dr$FKe^
zYwa)h*ywGJWq4g+{7ky$Cuac27pwrFEcu!?bYtmC`M47Tq-=X($*moZSqMVs0dT4p
znV_%)fD26a)D9)J*HPzjfYh=0C?fkGIf;+S@e&^rVi-5WkJ9YPgs5d4805nB#x8?B1ofU
zCm-?>0K&roAev0e_$NNfhIAnS{)mPk^HOOpbU%{WQ2hl*K${tjNg{{P2ULb_v{g3Q
z6u-M?l*)k5B&5Q&aL5$>g@95vWDM7G