init. project
This commit is contained in:
171
rag-web-ui/backend/nano_graphrag/entity_extraction/extract.py
Normal file
171
rag-web-ui/backend/nano_graphrag/entity_extraction/extract.py
Normal file
@@ -0,0 +1,171 @@
|
||||
from typing import Union
|
||||
import pickle
|
||||
import asyncio
|
||||
from openai import BadRequestError
|
||||
from collections import defaultdict
|
||||
import dspy
|
||||
from nano_graphrag.base import (
|
||||
BaseGraphStorage,
|
||||
BaseVectorStorage,
|
||||
TextChunkSchema,
|
||||
)
|
||||
from nano_graphrag.prompt import PROMPTS
|
||||
from nano_graphrag._utils import logger, compute_mdhash_id
|
||||
from nano_graphrag.entity_extraction.module import TypedEntityRelationshipExtractor
|
||||
from nano_graphrag._op import _merge_edges_then_upsert, _merge_nodes_then_upsert
|
||||
|
||||
|
||||
async def generate_dataset(
|
||||
chunks: dict[str, TextChunkSchema],
|
||||
filepath: str,
|
||||
save_dataset: bool = True,
|
||||
global_config: dict = {},
|
||||
) -> list[dspy.Example]:
|
||||
entity_extractor = TypedEntityRelationshipExtractor(num_refine_turns=1, self_refine=True)
|
||||
|
||||
if global_config.get("use_compiled_dspy_entity_relationship", False):
|
||||
entity_extractor.load(global_config["entity_relationship_module_path"])
|
||||
|
||||
ordered_chunks = list(chunks.items())
|
||||
already_processed = 0
|
||||
already_entities = 0
|
||||
already_relations = 0
|
||||
|
||||
async def _process_single_content(
|
||||
chunk_key_dp: tuple[str, TextChunkSchema]
|
||||
) -> dspy.Example:
|
||||
nonlocal already_processed, already_entities, already_relations
|
||||
chunk_dp = chunk_key_dp[1]
|
||||
content = chunk_dp["content"]
|
||||
try:
|
||||
prediction = await asyncio.to_thread(entity_extractor, input_text=content)
|
||||
entities, relationships = prediction.entities, prediction.relationships
|
||||
except BadRequestError as e:
|
||||
logger.error(f"Error in TypedEntityRelationshipExtractor: {e}")
|
||||
entities, relationships = [], []
|
||||
example = dspy.Example(
|
||||
input_text=content, entities=entities, relationships=relationships
|
||||
).with_inputs("input_text")
|
||||
already_entities += len(entities)
|
||||
already_relations += len(relationships)
|
||||
already_processed += 1
|
||||
now_ticks = PROMPTS["process_tickers"][
|
||||
already_processed % len(PROMPTS["process_tickers"])
|
||||
]
|
||||
print(
|
||||
f"{now_ticks} Processed {already_processed} chunks, {already_entities} entities(duplicated), {already_relations} relations(duplicated)\r",
|
||||
end="",
|
||||
flush=True,
|
||||
)
|
||||
return example
|
||||
|
||||
examples = await asyncio.gather(
|
||||
*[_process_single_content(c) for c in ordered_chunks]
|
||||
)
|
||||
filtered_examples = [
|
||||
example
|
||||
for example in examples
|
||||
if len(example.entities) > 0 and len(example.relationships) > 0
|
||||
]
|
||||
num_filtered_examples = len(examples) - len(filtered_examples)
|
||||
if save_dataset:
|
||||
with open(filepath, "wb") as f:
|
||||
pickle.dump(filtered_examples, f)
|
||||
logger.info(
|
||||
f"Saved {len(filtered_examples)} examples with keys: {filtered_examples[0].keys()}, filtered {num_filtered_examples} examples"
|
||||
)
|
||||
|
||||
return filtered_examples
|
||||
|
||||
|
||||
async def extract_entities_dspy(
|
||||
chunks: dict[str, TextChunkSchema],
|
||||
knwoledge_graph_inst: BaseGraphStorage,
|
||||
entity_vdb: BaseVectorStorage,
|
||||
global_config: dict,
|
||||
) -> Union[BaseGraphStorage, None]:
|
||||
entity_extractor = TypedEntityRelationshipExtractor(num_refine_turns=1, self_refine=True)
|
||||
|
||||
if global_config.get("use_compiled_dspy_entity_relationship", False):
|
||||
entity_extractor.load(global_config["entity_relationship_module_path"])
|
||||
|
||||
ordered_chunks = list(chunks.items())
|
||||
already_processed = 0
|
||||
already_entities = 0
|
||||
already_relations = 0
|
||||
|
||||
async def _process_single_content(chunk_key_dp: tuple[str, TextChunkSchema]):
|
||||
nonlocal already_processed, already_entities, already_relations
|
||||
chunk_key = chunk_key_dp[0]
|
||||
chunk_dp = chunk_key_dp[1]
|
||||
content = chunk_dp["content"]
|
||||
try:
|
||||
prediction = await asyncio.to_thread(entity_extractor, input_text=content)
|
||||
entities, relationships = prediction.entities, prediction.relationships
|
||||
except BadRequestError as e:
|
||||
logger.error(f"Error in TypedEntityRelationshipExtractor: {e}")
|
||||
entities, relationships = [], []
|
||||
|
||||
maybe_nodes = defaultdict(list)
|
||||
maybe_edges = defaultdict(list)
|
||||
|
||||
for entity in entities:
|
||||
entity["source_id"] = chunk_key
|
||||
maybe_nodes[entity["entity_name"]].append(entity)
|
||||
already_entities += 1
|
||||
|
||||
for relationship in relationships:
|
||||
relationship["source_id"] = chunk_key
|
||||
maybe_edges[(relationship["src_id"], relationship["tgt_id"])].append(
|
||||
relationship
|
||||
)
|
||||
already_relations += 1
|
||||
|
||||
already_processed += 1
|
||||
now_ticks = PROMPTS["process_tickers"][
|
||||
already_processed % len(PROMPTS["process_tickers"])
|
||||
]
|
||||
print(
|
||||
f"{now_ticks} Processed {already_processed} chunks, {already_entities} entities(duplicated), {already_relations} relations(duplicated)\r",
|
||||
end="",
|
||||
flush=True,
|
||||
)
|
||||
return dict(maybe_nodes), dict(maybe_edges)
|
||||
|
||||
results = await asyncio.gather(
|
||||
*[_process_single_content(c) for c in ordered_chunks]
|
||||
)
|
||||
print()
|
||||
maybe_nodes = defaultdict(list)
|
||||
maybe_edges = defaultdict(list)
|
||||
for m_nodes, m_edges in results:
|
||||
for k, v in m_nodes.items():
|
||||
maybe_nodes[k].extend(v)
|
||||
for k, v in m_edges.items():
|
||||
maybe_edges[k].extend(v)
|
||||
all_entities_data = await asyncio.gather(
|
||||
*[
|
||||
_merge_nodes_then_upsert(k, v, knwoledge_graph_inst, global_config)
|
||||
for k, v in maybe_nodes.items()
|
||||
]
|
||||
)
|
||||
await asyncio.gather(
|
||||
*[
|
||||
_merge_edges_then_upsert(k[0], k[1], v, knwoledge_graph_inst, global_config)
|
||||
for k, v in maybe_edges.items()
|
||||
]
|
||||
)
|
||||
if not len(all_entities_data):
|
||||
logger.warning("Didn't extract any entities, maybe your LLM is not working")
|
||||
return None
|
||||
if entity_vdb is not None:
|
||||
data_for_vdb = {
|
||||
compute_mdhash_id(dp["entity_name"], prefix="ent-"): {
|
||||
"content": dp["entity_name"] + dp["description"],
|
||||
"entity_name": dp["entity_name"],
|
||||
}
|
||||
for dp in all_entities_data
|
||||
}
|
||||
await entity_vdb.upsert(data_for_vdb)
|
||||
|
||||
return knwoledge_graph_inst
|
||||
62
rag-web-ui/backend/nano_graphrag/entity_extraction/metric.py
Normal file
62
rag-web-ui/backend/nano_graphrag/entity_extraction/metric.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import dspy
|
||||
from nano_graphrag.entity_extraction.module import Relationship
|
||||
|
||||
|
||||
class AssessRelationships(dspy.Signature):
|
||||
"""
|
||||
Assess the similarity between gold and predicted relationships:
|
||||
1. Match relationships based on src_id and tgt_id pairs, allowing for slight variations in entity names.
|
||||
2. For matched pairs, compare:
|
||||
a) Description similarity (semantic meaning)
|
||||
b) Weight similarity
|
||||
c) Order similarity
|
||||
3. Consider unmatched relationships as penalties.
|
||||
4. Aggregate scores, accounting for precision and recall.
|
||||
5. Return a final similarity score between 0 (no similarity) and 1 (perfect match).
|
||||
|
||||
Key considerations:
|
||||
- Prioritize matching based on entity pairs over exact string matches.
|
||||
- Use semantic similarity for descriptions rather than exact matches.
|
||||
- Weight the importance of different aspects (e.g., entity matching, description, weight, order).
|
||||
- Balance the impact of matched and unmatched relationships in the final score.
|
||||
"""
|
||||
|
||||
gold_relationships: list[Relationship] = dspy.InputField(
|
||||
desc="The gold-standard relationships to compare against."
|
||||
)
|
||||
predicted_relationships: list[Relationship] = dspy.InputField(
|
||||
desc="The predicted relationships to compare against the gold-standard relationships."
|
||||
)
|
||||
similarity_score: float = dspy.OutputField(
|
||||
desc="Similarity score between 0 and 1, with 1 being the highest similarity."
|
||||
)
|
||||
|
||||
|
||||
def relationships_similarity_metric(
|
||||
gold: dspy.Example, pred: dspy.Prediction, trace=None
|
||||
) -> float:
|
||||
model = dspy.ChainOfThought(AssessRelationships)
|
||||
gold_relationships = [Relationship(**item) for item in gold["relationships"]]
|
||||
predicted_relationships = [Relationship(**item) for item in pred["relationships"]]
|
||||
similarity_score = float(
|
||||
model(
|
||||
gold_relationships=gold_relationships,
|
||||
predicted_relationships=predicted_relationships,
|
||||
).similarity_score
|
||||
)
|
||||
return similarity_score
|
||||
|
||||
|
||||
def entity_recall_metric(
|
||||
gold: dspy.Example, pred: dspy.Prediction, trace=None
|
||||
) -> float:
|
||||
true_set = set(item["entity_name"] for item in gold["entities"])
|
||||
pred_set = set(item["entity_name"] for item in pred["entities"])
|
||||
true_positives = len(pred_set.intersection(true_set))
|
||||
false_negatives = len(true_set - pred_set)
|
||||
recall = (
|
||||
true_positives / (true_positives + false_negatives)
|
||||
if (true_positives + false_negatives) > 0
|
||||
else 0
|
||||
)
|
||||
return recall
|
||||
330
rag-web-ui/backend/nano_graphrag/entity_extraction/module.py
Normal file
330
rag-web-ui/backend/nano_graphrag/entity_extraction/module.py
Normal file
@@ -0,0 +1,330 @@
|
||||
import dspy
|
||||
from pydantic import BaseModel, Field
|
||||
from nano_graphrag._utils import clean_str
|
||||
from nano_graphrag._utils import logger
|
||||
|
||||
|
||||
"""
|
||||
Obtained from:
|
||||
https://github.com/SciPhi-AI/R2R/blob/6e958d1e451c1cb10b6fc868572659785d1091cb/r2r/providers/prompts/defaults.jsonl
|
||||
"""
|
||||
ENTITY_TYPES = [
|
||||
"PERSON",
|
||||
"ORGANIZATION",
|
||||
"LOCATION",
|
||||
"DATE",
|
||||
"TIME",
|
||||
"MONEY",
|
||||
"PERCENTAGE",
|
||||
"PRODUCT",
|
||||
"EVENT",
|
||||
"LANGUAGE",
|
||||
"NATIONALITY",
|
||||
"RELIGION",
|
||||
"TITLE",
|
||||
"PROFESSION",
|
||||
"ANIMAL",
|
||||
"PLANT",
|
||||
"DISEASE",
|
||||
"MEDICATION",
|
||||
"CHEMICAL",
|
||||
"MATERIAL",
|
||||
"COLOR",
|
||||
"SHAPE",
|
||||
"MEASUREMENT",
|
||||
"WEATHER",
|
||||
"NATURAL_DISASTER",
|
||||
"AWARD",
|
||||
"LAW",
|
||||
"CRIME",
|
||||
"TECHNOLOGY",
|
||||
"SOFTWARE",
|
||||
"HARDWARE",
|
||||
"VEHICLE",
|
||||
"FOOD",
|
||||
"DRINK",
|
||||
"SPORT",
|
||||
"MUSIC_GENRE",
|
||||
"INSTRUMENT",
|
||||
"ARTWORK",
|
||||
"BOOK",
|
||||
"MOVIE",
|
||||
"TV_SHOW",
|
||||
"ACADEMIC_SUBJECT",
|
||||
"SCIENTIFIC_THEORY",
|
||||
"POLITICAL_PARTY",
|
||||
"CURRENCY",
|
||||
"STOCK_SYMBOL",
|
||||
"FILE_TYPE",
|
||||
"PROGRAMMING_LANGUAGE",
|
||||
"MEDICAL_PROCEDURE",
|
||||
"CELESTIAL_BODY",
|
||||
]
|
||||
|
||||
|
||||
class Entity(BaseModel):
|
||||
entity_name: str = Field(..., description="The name of the entity.")
|
||||
entity_type: str = Field(..., description="The type of the entity.")
|
||||
description: str = Field(
|
||||
..., description="The description of the entity, in details and comprehensive."
|
||||
)
|
||||
importance_score: float = Field(
|
||||
...,
|
||||
ge=0,
|
||||
le=1,
|
||||
description="Importance score of the entity. Should be between 0 and 1 with 1 being the most important.",
|
||||
)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"entity_name": clean_str(self.entity_name.upper()),
|
||||
"entity_type": clean_str(self.entity_type.upper()),
|
||||
"description": clean_str(self.description),
|
||||
"importance_score": float(self.importance_score),
|
||||
}
|
||||
|
||||
|
||||
class Relationship(BaseModel):
|
||||
src_id: str = Field(..., description="The name of the source entity.")
|
||||
tgt_id: str = Field(..., description="The name of the target entity.")
|
||||
description: str = Field(
|
||||
...,
|
||||
description="The description of the relationship between the source and target entity, in details and comprehensive.",
|
||||
)
|
||||
weight: float = Field(
|
||||
...,
|
||||
ge=0,
|
||||
le=1,
|
||||
description="The weight of the relationship. Should be between 0 and 1 with 1 being the strongest relationship.",
|
||||
)
|
||||
order: int = Field(
|
||||
...,
|
||||
ge=1,
|
||||
le=3,
|
||||
description="The order of the relationship. 1 for direct relationships, 2 for second-order, 3 for third-order.",
|
||||
)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"src_id": clean_str(self.src_id.upper()),
|
||||
"tgt_id": clean_str(self.tgt_id.upper()),
|
||||
"description": clean_str(self.description),
|
||||
"weight": float(self.weight),
|
||||
"order": int(self.order),
|
||||
}
|
||||
|
||||
|
||||
class CombinedExtraction(dspy.Signature):
|
||||
"""
|
||||
Given a text document that is potentially relevant to this activity and a list of entity types,
|
||||
identify all entities of those types from the text and all relationships among the identified entities.
|
||||
|
||||
Entity Guidelines:
|
||||
1. Each entity name should be an actual atomic word from the input text.
|
||||
2. Avoid duplicates and generic terms.
|
||||
3. Make sure descriptions are detailed and comprehensive. Use multiple complete sentences for each point below:
|
||||
a). The entity's role or significance in the context
|
||||
b). Key attributes or characteristics
|
||||
c). Relationships to other entities (if applicable)
|
||||
d). Historical or cultural relevance (if applicable)
|
||||
e). Any notable actions or events associated with the entity
|
||||
4. All entity types from the text must be included.
|
||||
5. IMPORTANT: Only use entity types from the provided 'entity_types' list. Do not introduce new entity types.
|
||||
|
||||
Relationship Guidelines:
|
||||
1. Make sure relationship descriptions are detailed and comprehensive. Use multiple complete sentences for each point below:
|
||||
a). The nature of the relationship (e.g., familial, professional, causal)
|
||||
b). The impact or significance of the relationship on both entities
|
||||
c). Any historical or contextual information relevant to the relationship
|
||||
d). How the relationship evolved over time (if applicable)
|
||||
e). Any notable events or actions that resulted from this relationship
|
||||
2. Include direct relationships (order 1) as well as higher-order relationships (order 2 and 3):
|
||||
a). Direct relationships: Immediate connections between entities.
|
||||
b). Second-order relationships: Indirect effects or connections that result from direct relationships.
|
||||
c). Third-order relationships: Further indirect effects that result from second-order relationships.
|
||||
3. The "src_id" and "tgt_id" fields must exactly match entity names from the extracted entities list.
|
||||
"""
|
||||
|
||||
input_text: str = dspy.InputField(
|
||||
desc="The text to extract entities and relationships from."
|
||||
)
|
||||
entity_types: list[str] = dspy.InputField(
|
||||
desc="List of entity types used for extraction."
|
||||
)
|
||||
entities: list[Entity] = dspy.OutputField(
|
||||
desc="List of entities extracted from the text and the entity types."
|
||||
)
|
||||
relationships: list[Relationship] = dspy.OutputField(
|
||||
desc="List of relationships extracted from the text and the entity types."
|
||||
)
|
||||
|
||||
|
||||
class CritiqueCombinedExtraction(dspy.Signature):
|
||||
"""
|
||||
Critique the current extraction of entities and relationships from a given text.
|
||||
Focus on completeness, accuracy, and adherence to the provided entity types and extraction guidelines.
|
||||
|
||||
Critique Guidelines:
|
||||
1. Evaluate if all relevant entities from the input text are captured and correctly typed.
|
||||
2. Check if entity descriptions are comprehensive and follow the provided guidelines.
|
||||
3. Assess the completeness of relationship extractions, including higher-order relationships.
|
||||
4. Verify that relationship descriptions are detailed and follow the provided guidelines.
|
||||
5. Identify any inconsistencies, errors, or missed opportunities in the current extraction.
|
||||
6. Suggest specific improvements or additions to enhance the quality of the extraction.
|
||||
"""
|
||||
|
||||
input_text: str = dspy.InputField(
|
||||
desc="The original text from which entities and relationships were extracted."
|
||||
)
|
||||
entity_types: list[str] = dspy.InputField(
|
||||
desc="List of valid entity types for this extraction task."
|
||||
)
|
||||
current_entities: list[Entity] = dspy.InputField(
|
||||
desc="List of currently extracted entities to be critiqued."
|
||||
)
|
||||
current_relationships: list[Relationship] = dspy.InputField(
|
||||
desc="List of currently extracted relationships to be critiqued."
|
||||
)
|
||||
entity_critique: str = dspy.OutputField(
|
||||
desc="Detailed critique of the current entities, highlighting areas for improvement for completeness and accuracy.."
|
||||
)
|
||||
relationship_critique: str = dspy.OutputField(
|
||||
desc="Detailed critique of the current relationships, highlighting areas for improvement for completeness and accuracy.."
|
||||
)
|
||||
|
||||
|
||||
class RefineCombinedExtraction(dspy.Signature):
|
||||
"""
|
||||
Refine the current extraction of entities and relationships based on the provided critique.
|
||||
Improve completeness, accuracy, and adherence to the extraction guidelines.
|
||||
|
||||
Refinement Guidelines:
|
||||
1. Address all points raised in the entity and relationship critiques.
|
||||
2. Add missing entities and relationships identified in the critique.
|
||||
3. Improve entity and relationship descriptions as suggested.
|
||||
4. Ensure all refinements still adhere to the original extraction guidelines.
|
||||
5. Maintain consistency between entities and relationships during refinement.
|
||||
6. Focus on enhancing the overall quality and comprehensiveness of the extraction.
|
||||
"""
|
||||
|
||||
input_text: str = dspy.InputField(
|
||||
desc="The original text from which entities and relationships were extracted."
|
||||
)
|
||||
entity_types: list[str] = dspy.InputField(
|
||||
desc="List of valid entity types for this extraction task."
|
||||
)
|
||||
current_entities: list[Entity] = dspy.InputField(
|
||||
desc="List of currently extracted entities to be refined."
|
||||
)
|
||||
current_relationships: list[Relationship] = dspy.InputField(
|
||||
desc="List of currently extracted relationships to be refined."
|
||||
)
|
||||
entity_critique: str = dspy.InputField(
|
||||
desc="Detailed critique of the current entities to guide refinement."
|
||||
)
|
||||
relationship_critique: str = dspy.InputField(
|
||||
desc="Detailed critique of the current relationships to guide refinement."
|
||||
)
|
||||
refined_entities: list[Entity] = dspy.OutputField(
|
||||
desc="List of refined entities, addressing the entity critique and improving upon the current entities."
|
||||
)
|
||||
refined_relationships: list[Relationship] = dspy.OutputField(
|
||||
desc="List of refined relationships, addressing the relationship critique and improving upon the current relationships."
|
||||
)
|
||||
|
||||
|
||||
class TypedEntityRelationshipExtractorException(dspy.Module):
|
||||
def __init__(
|
||||
self,
|
||||
predictor: dspy.Module,
|
||||
exception_types: tuple[type[Exception]] = (Exception,),
|
||||
):
|
||||
super().__init__()
|
||||
self.predictor = predictor
|
||||
self.exception_types = exception_types
|
||||
|
||||
def copy(self):
|
||||
return TypedEntityRelationshipExtractorException(self.predictor)
|
||||
|
||||
def forward(self, **kwargs):
|
||||
try:
|
||||
prediction = self.predictor(**kwargs)
|
||||
return prediction
|
||||
|
||||
except Exception as e:
|
||||
if isinstance(e, self.exception_types):
|
||||
return dspy.Prediction(entities=[], relationships=[])
|
||||
|
||||
raise e
|
||||
|
||||
|
||||
class TypedEntityRelationshipExtractor(dspy.Module):
|
||||
def __init__(
|
||||
self,
|
||||
lm: dspy.LM = None,
|
||||
max_retries: int = 3,
|
||||
entity_types: list[str] = ENTITY_TYPES,
|
||||
self_refine: bool = False,
|
||||
num_refine_turns: int = 1,
|
||||
):
|
||||
super().__init__()
|
||||
self.lm = lm
|
||||
self.entity_types = entity_types
|
||||
self.self_refine = self_refine
|
||||
self.num_refine_turns = num_refine_turns
|
||||
|
||||
self.extractor = dspy.ChainOfThought(
|
||||
signature=CombinedExtraction, max_retries=max_retries
|
||||
)
|
||||
self.extractor = TypedEntityRelationshipExtractorException(
|
||||
self.extractor, exception_types=(ValueError,)
|
||||
)
|
||||
|
||||
if self.self_refine:
|
||||
self.critique = dspy.ChainOfThought(
|
||||
signature=CritiqueCombinedExtraction, max_retries=max_retries
|
||||
)
|
||||
self.refine = dspy.ChainOfThought(
|
||||
signature=RefineCombinedExtraction, max_retries=max_retries
|
||||
)
|
||||
|
||||
def forward(self, input_text: str) -> dspy.Prediction:
|
||||
with dspy.context(lm=self.lm if self.lm is not None else dspy.settings.lm):
|
||||
extraction_result = self.extractor(
|
||||
input_text=input_text, entity_types=self.entity_types
|
||||
)
|
||||
|
||||
current_entities: list[Entity] = extraction_result.entities
|
||||
current_relationships: list[Relationship] = extraction_result.relationships
|
||||
|
||||
if self.self_refine:
|
||||
for _ in range(self.num_refine_turns):
|
||||
critique_result = self.critique(
|
||||
input_text=input_text,
|
||||
entity_types=self.entity_types,
|
||||
current_entities=current_entities,
|
||||
current_relationships=current_relationships,
|
||||
)
|
||||
refined_result = self.refine(
|
||||
input_text=input_text,
|
||||
entity_types=self.entity_types,
|
||||
current_entities=current_entities,
|
||||
current_relationships=current_relationships,
|
||||
entity_critique=critique_result.entity_critique,
|
||||
relationship_critique=critique_result.relationship_critique,
|
||||
)
|
||||
logger.debug(
|
||||
f"entities: {len(current_entities)} | refined_entities: {len(refined_result.refined_entities)}"
|
||||
)
|
||||
logger.debug(
|
||||
f"relationships: {len(current_relationships)} | refined_relationships: {len(refined_result.refined_relationships)}"
|
||||
)
|
||||
current_entities = refined_result.refined_entities
|
||||
current_relationships = refined_result.refined_relationships
|
||||
|
||||
entities = [entity.to_dict() for entity in current_entities]
|
||||
relationships = [
|
||||
relationship.to_dict() for relationship in current_relationships
|
||||
]
|
||||
|
||||
return dspy.Prediction(entities=entities, relationships=relationships)
|
||||
Reference in New Issue
Block a user