Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.text_splitter import CharacterTextSplitter
|
2 |
+
from langchain.docstore.document import Document
|
3 |
+
from langchain.chains.summarize import load_summarize_chain
|
4 |
+
from langchain import OpenAI, LLMChain, HuggingFaceHub
|
5 |
+
import textwrap
|
6 |
+
|
7 |
+
|
8 |
+
def summarize(doc):
|
9 |
+
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0,
|
10 |
+
separator="\n") # Initilize an instance of CharacterTextSplitter
|
11 |
+
chunks = text_splitter.split_text(doc)
|
12 |
+
doc_store = [Document(page_content=text) for text in chunks]
|
13 |
+
llm_model = OpenAI(model_name="text-davinci-003", temperature=0) # define your language model
|
14 |
+
|
15 |
+
summarization_chain2 = load_summarize_chain(llm=llm_model,
|
16 |
+
chain_type='map_reduce',
|
17 |
+
verbose=True # define the chain type)
|
18 |
+
)
|
19 |
+
output_summary = summarization_chain2.run(doc_store)
|
20 |
+
wrapped_text = textwrap.fill(output_summary, width=100)
|
21 |
+
|
22 |
+
return wrapped_text
|
23 |
+
|
24 |
+
if __name__ == "__main__":
|
25 |
+
# make a gradio interface
|
26 |
+
import gradio as gr
|
27 |
+
|
28 |
+
outputs = gr.outputs.Textbox()
|
29 |
+
|
30 |
+
app = gr.Interface(fn=summarize, inputs='text', outputs=outputs,description="This is a text summarization model").launch()
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
|