yudari commited on
Commit
2400df5
1 Parent(s): dcbd779

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -14
app.py CHANGED
@@ -1,28 +1,22 @@
1
- from fastapi import FastAPI
2
- from transformers import pipeline
3
-
4
- # Create a new FastAPI app instance
5
- app = FastAPI()
6
-
7
  # Initialize the text generation pipeline
8
  # This function will be able to generate text
9
  # given an input.
10
- pipe = pipeline("text-search-semantic",
11
- model="NousResearch/Llama-2-13b-hf")
12
-
13
  # Define a function to handle the GET request at `/generate`
14
- # The generate() function is defined as a FastAPI route that takes a
15
- # string parameter called text. The function generates text based on the # input using the pipeline() object, and returns a JSON response
 
16
  # containing the generated text under the key "output"
17
  @app.get("/generate")
18
  def generate(text: str):
19
  """
20
  Using the text2text-generation pipeline from `transformers`, generate text
21
- from the given input text. The model used is `google/flan-t5-small`, which
22
- can be found [here](<https://huggingface.co/google/flan-t5-small>).
23
  """
24
  # Use the pipeline to generate text from the given input text
25
  output = pipe(text)
26
-
27
  # Return the generated text in a JSON response
28
  return {"output": output[0]["generated_text"]}
 
 
 
 
 
 
 
1
  # Initialize the text generation pipeline
2
  # This function will be able to generate text
3
  # given an input.
4
+ pipe = pipeline("text2text-generation", model="NousResearch/Llama-2-13b-hf")
5
+
 
6
  # Define a function to handle the GET request at `/generate`
7
+ # The generate() function is defined as a FastAPI route that takes a
8
+ # string parameter called text. The function generates text based on the
9
+ # input using the pipeline() object, and returns a JSON response
10
  # containing the generated text under the key "output"
11
  @app.get("/generate")
12
  def generate(text: str):
13
  """
14
  Using the text2text-generation pipeline from `transformers`, generate text
15
+ from the given input text. The model used is `NousResearch/Llama-2-13b-hf`,
16
+ which can be found [here](https://huggingface.co/NousResearch/Llama-2-13b-hf).
17
  """
18
  # Use the pipeline to generate text from the given input text
19
  output = pipe(text)
20
+
21
  # Return the generated text in a JSON response
22
  return {"output": output[0]["generated_text"]}