awakenai commited on
Commit
9eca664
1 Parent(s): 3ae8602

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +26 -9
main.py CHANGED
@@ -1,21 +1,38 @@
1
- #pip install fastapi
2
- #uvicorn main:app --reload
3
- #import gradio as gr
4
-
5
- from transformers import pipeline
6
  from fastapi import FastAPI
7
-
8
  app = FastAPI()
9
 
10
- #generator = pipeline('text-generation',model='gpt2')
11
- #generator = pipeline('text-generation',model='Open-Orca/Mistral-7B-OpenOrca')
12
  #generator = pipeline("text-generation", model="lmsys/vicuna-7b-v1.5")
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  @app.get("/")
16
  async def root():
17
- return {"message": "Hello World"}
18
  #return generator('What is love',max_length=100, num_return_sequences=1)
 
19
 
20
  @app.post("/predict")
21
  async def root(text):
 
1
+ #from transformers import pipeline
 
 
 
 
2
  from fastapi import FastAPI
 
3
  app = FastAPI()
4
 
 
 
5
  #generator = pipeline("text-generation", model="lmsys/vicuna-7b-v1.5")
6
 
7
+ from haystack.document_stores import InMemoryDocumentStore
8
+ from haystack.utils import build_pipeline, add_example_data, print_answers
9
+
10
+ # We are model agnostic :) Here, you can choose from: "anthropic", "cohere", "huggingface", and "openai".
11
+ provider = "openai"
12
+ API_KEY = "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" # ADD YOUR KEY HERE
13
+
14
+ # We support many different databases. Here we load a simple and lightweight in-memory database.
15
+ document_store = InMemoryDocumentStore(use_bm25=True)
16
+
17
+ # Download and add Game of Thrones TXT articles to Haystack DocumentStore.
18
+ # You can also provide a folder with your local documents.
19
+ #add_example_data(document_store, "data/GoT_getting_started")
20
+ add_example_data(document_store, "/content/Books")
21
+
22
+ # Build a pipeline with a Retriever to get relevant documents to the query and a PromptNode interacting with LLMs using a custom prompt.
23
+ pipeline = build_pipeline(provider, API_KEY, document_store)
24
+
25
+ # Ask a question on the data you just added.
26
+ result = pipeline.run(query="What is job yoga?")
27
+
28
+ # For details, like which documents were used to generate the answer, look into the <result> object
29
+ print_answers(result, details="medium")
30
 
31
  @app.get("/")
32
  async def root():
33
+ #return {"message": "Hello World"}
34
  #return generator('What is love',max_length=100, num_return_sequences=1)
35
+ return print_answers(result, details="medium")
36
 
37
  @app.post("/predict")
38
  async def root(text):