File size: 1,829 Bytes
9eca664
38c936b
 
 
3ae8602
ff740be
9eca664
 
 
 
 
 
 
 
 
 
 
 
 
53d8e15
9eca664
 
 
 
 
847d8b3
9eca664
 
1fab184
38c936b
947da65
38c936b
 
9eca664
38c936b
9eca664
947da65
 
38c936b
 
 
 
8f6006e
c6667fc
745ac2b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
#from transformers import pipeline
from fastapi import FastAPI
app = FastAPI()

#generator = pipeline("text-generation", model="lmsys/vicuna-7b-v1.5")

from haystack.document_stores import InMemoryDocumentStore
from haystack.utils import build_pipeline, add_example_data, print_answers

# We are model agnostic :) Here, you can choose from: "anthropic", "cohere", "huggingface", and "openai".
provider = "openai"
API_KEY = "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" # ADD YOUR KEY HERE

# We support many different databases. Here we load a simple and lightweight in-memory database.
document_store = InMemoryDocumentStore(use_bm25=True)

# Download and add Game of Thrones TXT articles to Haystack DocumentStore.
# You can also provide a folder with your local documents.
#add_example_data(document_store, "data/GoT_getting_started")
add_example_data(document_store, "./Reference")

# Build a pipeline with a Retriever to get relevant documents to the query and a PromptNode interacting with LLMs using a custom prompt.
pipeline = build_pipeline(provider, API_KEY, document_store)

# Ask a question on the data you just added.
#result = pipeline.run(query=text)

# For details, like which documents were used to generate the answer, look into the <result> object
#print_answers(result, details="minimum") #minimum, medium, all

'''
@app.get("/")
async def root():
    #return {"message": "Hello World"}
    #return generator('What is love',max_length=100, num_return_sequences=1)
    return print_answers(result, details="medium")
'''


@app.post("/predict")
async def root(text):
    #return {"message": "Hello World"}
    #return generator(text,max_length=100, num_return_sequences=1)
    #return pipeline.run(query=text, details="minimum")
    #result = pipeline.run(query=text)
    return pipeline.run(query=text)