File size: 2,347 Bytes
611aebd
 
 
 
13b16b6
 
 
 
611aebd
 
 
13b16b6
 
 
611aebd
13b16b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
611aebd
 
 
13b16b6
611aebd
 
13b16b6
 
 
 
 
611aebd
 
13b16b6
 
 
 
 
 
 
611aebd
 
 
 
 
13b16b6
 
 
 
 
 
 
 
611aebd
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import os

import gradio as gr
import weaviate
from langchain import LLMChain
from langchain.chains import SequentialChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate

collection_name = "Chunk"

MODEL = "gpt-3.5-turbo"
LANGUAGE = "en"  # nl / en
llm = ChatOpenAI(temperature=0.0, openai_api_key=os.environ["OPENAI_API_KEY"])


def get_answer_given_the_context(llm, prompt, context) -> SequentialChain:
    template = f"""
    Provide an answer to the prompt given the context.
    
    <PROMPT>
    
    {prompt}
    
    <CONTEXT>
    
    {context}
    
    """

    prompt_get_skills_intersection = ChatPromptTemplate.from_template(template=template)
    skills_match_chain = LLMChain(
        llm=llm,
        prompt=prompt_get_skills_intersection,
        output_key="answer",
    )

    chain = SequentialChain(
        chains=[skills_match_chain],
        input_variables=["prompt", "context"],
        output_variables=[
            skills_match_chain.output_key,
        ],
        verbose=False,
    )
    return chain({"prompt": prompt, "context": context})["answer"]


def predict(prompt):
    client = weaviate.Client(
        url=os.environ["WEAVIATE_URL"],
        auth_client_secret=weaviate.AuthApiKey(api_key=os.environ["WEAVIATE_API_KEY"]),
        additional_headers={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]},
    )

    search_result = (
        client.query.get(class_name=collection_name, properties=["text"])
        .with_near_text({"concepts": prompt})
        # .with_generate(single_prompt="{text}")
        .with_limit(5)
        .do()
    )
    context_list = [
        element["text"] for element in search_result["data"]["Get"]["Chunk"]
    ]
    context = "\n".join(context_list)

    return get_answer_given_the_context(llm=llm, prompt=prompt, context=context)


iface = gr.Interface(
    fn=predict,  # the function to wrap
    inputs="text",  # the input type
    outputs="text",  # the output type
    examples=[
        [f"what is the process of raising an incident?"],
        [f"What is Cx0 program management?"],
        [
            f"What is process for identifying risksthat can impact the desired outcomes of a project?"
        ],
        [f"What is the release management process?"],
    ],
)

if __name__ == "__main__":
    iface.launch()