Update app.py
Browse files
app.py
CHANGED
@@ -9,21 +9,25 @@ from g4f import Provider, models
|
|
9 |
app = Flask(__name__)
|
10 |
@app.route("/<txt>")
|
11 |
def t5(txt):
|
|
|
|
|
|
|
|
|
12 |
# Instantiate the LLM model
|
13 |
llm = G4FLLM(
|
14 |
model=models.gpt_35_turbo,
|
15 |
provider=Provider.FreeGpt,
|
16 |
)
|
17 |
-
|
18 |
-
text_splitter = CharacterTextSplitter()
|
19 |
-
texts = text_splitter.split_text(txt)
|
20 |
-
# Create multiple documents
|
21 |
-
docs = [Document(page_content=t) for t in texts]
|
22 |
-
# Text summarization
|
23 |
-
chain = load_summarize_chain(llm, chain_type='map_reduce')
|
24 |
-
output = chain.run(docs)
|
25 |
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
if __name__ == "__main__":
|
29 |
app.run(host="0.0.0.0", port=7860)
|
|
|
9 |
app = Flask(__name__)
|
10 |
@app.route("/<txt>")
|
11 |
def t5(txt):
|
12 |
+
prompt_template = """Write a concise summary of the following around 450 words:
|
13 |
+
"{txt}"
|
14 |
+
CONCISE SUMMARY:"""
|
15 |
+
prompt = PromptTemplate.from_template(prompt_template)
|
16 |
# Instantiate the LLM model
|
17 |
llm = G4FLLM(
|
18 |
model=models.gpt_35_turbo,
|
19 |
provider=Provider.FreeGpt,
|
20 |
)
|
21 |
+
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
# Define StuffDocumentsChain
|
24 |
+
stuff_chain = StuffDocumentsChain(
|
25 |
+
llm_chain=llm_chain, document_variable_name="text"
|
26 |
+
)
|
27 |
+
|
28 |
+
docs = loader.load()
|
29 |
+
|
30 |
+
return jsonify({"output": stuff_chain.run(docs)})
|
31 |
|
32 |
if __name__ == "__main__":
|
33 |
app.run(host="0.0.0.0", port=7860)
|