Update app.py
Browse files
app.py
CHANGED
@@ -12,25 +12,26 @@ def index():
|
|
12 |
return jsonify({"output": ""})
|
13 |
@app.route("/<text>",methods=["GET"])
|
14 |
def t5(text):
|
15 |
-
prompt_template = """Write a concise summary of the following
|
16 |
-
"{text}"
|
17 |
-
CONCISE SUMMARY:"""
|
18 |
-
prompt = PromptTemplate.from_template(prompt_template)
|
19 |
-
# Instantiate the LLM model
|
20 |
-
llm = G4FLLM(
|
21 |
-
|
22 |
-
|
23 |
-
)
|
24 |
-
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
25 |
|
26 |
-
# Define StuffDocumentsChain
|
27 |
-
stuff_chain = StuffDocumentsChain(
|
28 |
-
|
29 |
-
)
|
30 |
|
31 |
-
docs = loader.load()
|
32 |
-
|
33 |
-
|
|
|
34 |
|
35 |
if __name__ == "__main__":
|
36 |
app.run(host="0.0.0.0", port=7860)
|
|
|
12 |
return jsonify({"output": ""})
|
13 |
@app.route("/<text>",methods=["GET"])
|
14 |
def t5(text):
|
15 |
+
# prompt_template = """Write a concise summary around 450 words of the following :
|
16 |
+
# "{text}"
|
17 |
+
# CONCISE SUMMARY:"""
|
18 |
+
# prompt = PromptTemplate.from_template(prompt_template)
|
19 |
+
# # Instantiate the LLM model
|
20 |
+
# llm = G4FLLM(
|
21 |
+
# model=models.gpt_35_turbo,
|
22 |
+
# provider=Provider.FreeGpt,
|
23 |
+
# )
|
24 |
+
# llm_chain = LLMChain(llm=llm, prompt=prompt)
|
25 |
|
26 |
+
# # Define StuffDocumentsChain
|
27 |
+
# stuff_chain = StuffDocumentsChain(
|
28 |
+
# llm_chain=llm_chain, document_variable_name="text"
|
29 |
+
# )
|
30 |
|
31 |
+
# docs = loader.load()
|
32 |
+
output = stuff_chain.run(docs)
|
33 |
+
output = text
|
34 |
+
return jsonify({"output": output})
|
35 |
|
36 |
if __name__ == "__main__":
|
37 |
app.run(host="0.0.0.0", port=7860)
|