import sys from yachalk import chalk sys.path.append("..") import json import ollama.client as client def extractConcepts(prompt: str, metadata={}, model="mistral-openorca:latest"): SYS_PROMPT = ( "Your task is to extract the key concepts (and non-personal entities) mentioned in the given context. " "Focus on identifying only the most important and fundamental concepts. If needed, break the concepts down into simpler, more atomic components. " "Ensure each concept is clear and distinct. " "Categorize each concept into one of the following categories: [event, concept, place, object, document, organisation, condition, misc].\n" "For each extracted concept, assess its contextual importance on a scale from 1 to 5, with 5 being the highest level of importance. " "Format your output as a JSON list with the following structure:\n" "[\n" " {\n" ' "entity": "The Concept",\n' ' "importance": "The contextual importance of the concept on a scale of 1 to 5",\n' ' "category": "The Type of Concept",\n' " },\n" " {\n" ' "entity": "Another Concept",\n' ' "importance": "The contextual importance of this concept on a scale of 1 to 5",\n' ' "category": "The Type of Concept",\n' " },\n" " // Additional concepts can be added in the same format\n" "]\n" "Ensure the output is well-structured and free of errors." ) response, _ = client.generate(model_name=model, system=SYS_PROMPT, prompt=prompt) try: result = json.loads(response) result = [dict(item, **metadata) for item in result] except: print("\n\nERROR ### Here is the buggy response: ", response, "\n\n") result = None return result def graphPrompt(input: str, metadata={}, model="mistral-openorca:latest"): if model == None: model = "mistral-openorca:latest" # model_info = client.show(model_name=model) # print( chalk.blue(model_info)) SYS_PROMPT = ( "You are a network graph maker who extracts terms and their relations from a given context. " "You are provided with a context chunk (delimited by ```) Your task is to extract the ontology " "of terms mentioned in the given context. These terms should represent the key concepts as per the context. \n" "Thought 1: While traversing through each sentence, Think about the key terms mentioned in it.\n" "\tTerms may include object, entity, location, organization, person, \n" "\tcondition, acronym, documents, service, concept, etc.\n" "\tTerms should be as atomistic as possible\n\n" "Thought 2: Think about how these terms can have one on one relation with other terms.\n" "\tTerms that are mentioned in the same sentence or the same paragraph are typically related to each other.\n" "\tTerms can be related to many other terms\n\n" "Thought 3: Find out the relation between each such related pair of terms. \n\n" "Format your output as a list of json. Each element of the list contains a pair of terms" "and the relation between them, like the follwing: \n" "[\n" " {\n" ' "node_1": "A concept from extracted ontology",\n' ' "node_2": "A related concept from extracted ontology",\n' ' "edge": "relationship between the two concepts, node_1 and node_2 in one or two sentences"\n' " }, {...}\n" "]" ) USER_PROMPT = f"context: ```{input}``` \n\n output: " response, _ = client.generate(model_name=model, system=SYS_PROMPT, prompt=USER_PROMPT) try: result = json.loads(response) result = [dict(item, **metadata) for item in result] except: print("\n\nERROR ### Here is the buggy response: ", response, "\n\n") result = None return result