Spaces:
Sleeping
Sleeping
Delete app.py
Browse files
app.py
DELETED
@@ -1,157 +0,0 @@
|
|
1 |
-
from langchain_community.document_loaders import WebBaseLoader
|
2 |
-
from langchain.prompts import ChatPromptTemplate
|
3 |
-
from langchain.output_parsers import ResponseSchema
|
4 |
-
from langchain.output_parsers import StructuredOutputParser
|
5 |
-
from langchain.prompts import PromptTemplate
|
6 |
-
from langchain.chat_models import ChatOpenAI
|
7 |
-
from langchain.chains import LLMChain
|
8 |
-
from dotenv import load_dotenv
|
9 |
-
import requests
|
10 |
-
from urllib.parse import urlparse
|
11 |
-
import streamlit as st
|
12 |
-
|
13 |
-
|
14 |
-
load_dotenv()
|
15 |
-
|
16 |
-
# check the domain of the url (Over here we are checking whether it is a www.linkedin.com or not)
|
17 |
-
def extract_domain(url):
|
18 |
-
parsed_url = urlparse(url)
|
19 |
-
domain = parsed_url.netloc
|
20 |
-
return domain
|
21 |
-
|
22 |
-
|
23 |
-
def is_shortened_url(url): # It is checking whether it is a shorten url or regular website url
|
24 |
-
try:
|
25 |
-
response = requests.head(url, allow_redirects=True)
|
26 |
-
final_url = response.url
|
27 |
-
if final_url != url:
|
28 |
-
return True
|
29 |
-
return False
|
30 |
-
except requests.exceptions.RequestException as e:
|
31 |
-
print("Error:", e)
|
32 |
-
return False
|
33 |
-
|
34 |
-
def expand_short_url(short_url): # It is converting shorten url to regular url
|
35 |
-
try:
|
36 |
-
response = requests.head(short_url, allow_redirects=True)
|
37 |
-
if response.status_code == 200:
|
38 |
-
return response.url
|
39 |
-
else:
|
40 |
-
print("Error: Short URL couldn't be expanded.")
|
41 |
-
return None
|
42 |
-
except requests.exceptions.RequestException as e:
|
43 |
-
print("Error:", e)
|
44 |
-
return None
|
45 |
-
|
46 |
-
def get_original_url(url):
|
47 |
-
if is_shortened_url(url):
|
48 |
-
return expand_short_url(url)
|
49 |
-
else:
|
50 |
-
return url
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
# This is the complete code where we are extracting content from the url using WebBaseLoader , using LLM to extract blog content only and then paraphrasing it
|
55 |
-
def paraphrased_post(url):
|
56 |
-
loader=WebBaseLoader([url],encoding='utf-8')
|
57 |
-
docs = loader.load()
|
58 |
-
|
59 |
-
template="""You are a helpful LinkedIn webscrapper. You are provided with a data , extract the content of the post only.
|
60 |
-
{docs}"""
|
61 |
-
|
62 |
-
|
63 |
-
prompt=PromptTemplate(template=template,input_variables=['docs'])
|
64 |
-
llm=ChatOpenAI(temperature=0)
|
65 |
-
chain=LLMChain(llm=llm,prompt=prompt)
|
66 |
-
|
67 |
-
|
68 |
-
result=chain.invoke({'docs':docs},return_only_outputs=True)
|
69 |
-
|
70 |
-
data=result['text']
|
71 |
-
|
72 |
-
template="""You are a helpful LinkedIn post paraphraser and plagiarism remover bot. You are provided with LinkedIn post content and your task is to paraphrase it and remove plagiarism .Return the output in the format with spaces or stickers if present.
|
73 |
-
{data}"""
|
74 |
-
|
75 |
-
prompt2=PromptTemplate(template=template,input_variables=['data'])
|
76 |
-
llm=ChatOpenAI(temperature=0)
|
77 |
-
chain2=LLMChain(llm=llm,prompt=prompt2)
|
78 |
-
|
79 |
-
result2=chain2({'data':data},return_only_outputs=True)
|
80 |
-
data2=extract_data(result2['text'])
|
81 |
-
Keywords=",".join(data2['Keywords'])
|
82 |
-
take_aways=",".join(data2['Take Aways'])
|
83 |
-
highlights=",".join(data2['Highlights'])
|
84 |
-
|
85 |
-
return result2['text'] ,Keywords,take_aways,highlights
|
86 |
-
|
87 |
-
|
88 |
-
def extract_data(post_data):
|
89 |
-
keywords = ResponseSchema(name="Keywords",
|
90 |
-
description="These are the keywords extracted from LinkedIn post",type="list")
|
91 |
-
|
92 |
-
Take_aways = ResponseSchema(name="Take Aways",
|
93 |
-
description="These are the take aways extracted from LinkedIn post", type= "list")
|
94 |
-
Highlights=ResponseSchema(name="Highlights",
|
95 |
-
description="These are the highlights extracted from LinkedIn post", type= "list")
|
96 |
-
|
97 |
-
response_schema = [
|
98 |
-
keywords,
|
99 |
-
Take_aways,
|
100 |
-
Highlights
|
101 |
-
|
102 |
-
]
|
103 |
-
output_parser = StructuredOutputParser.from_response_schemas(response_schema)
|
104 |
-
format_instructions = output_parser.get_format_instructions()
|
105 |
-
|
106 |
-
template = """
|
107 |
-
You are a helpful keywords , take aways and highlights extractor from the post of LinkedIn Bot. Your task is to extract relevant keywords , take aways and highlights extractor.
|
108 |
-
From the following text message, extract the following information:
|
109 |
-
|
110 |
-
text message: {content}
|
111 |
-
{format_instructions}
|
112 |
-
"""
|
113 |
-
|
114 |
-
prompt_template = ChatPromptTemplate.from_template(template)
|
115 |
-
messages = prompt_template.format_messages(content=post_data, format_instructions=format_instructions)
|
116 |
-
llm = ChatOpenAI(temperature=0)
|
117 |
-
response = llm(messages)
|
118 |
-
output_dict= output_parser.parse(response.content)
|
119 |
-
return output_dict
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
#------------------------streamlit code -------------------------------------
|
125 |
-
|
126 |
-
|
127 |
-
st.set_page_config(page_title="Paraphrase LinkedIn post")
|
128 |
-
st.title("Paraphrase LinkedIn Post")
|
129 |
-
|
130 |
-
def main():
|
131 |
-
st.sidebar.header("Paraphrase LinkedIn Post")
|
132 |
-
link = st.sidebar.text_input("Enter a Post URL:")
|
133 |
-
|
134 |
-
if link:
|
135 |
-
original_url=get_original_url(link)
|
136 |
-
domain=extract_domain(original_url)
|
137 |
-
if domain=="www.linkedin.com":
|
138 |
-
paraphrased_content,keywords ,take_aways,highlights=paraphrased_post(original_url)
|
139 |
-
st.write(paraphrased_content)
|
140 |
-
st.write("Keywords:",keywords)
|
141 |
-
st.write("Take aways:",take_aways)
|
142 |
-
st.write("Highlights:",highlights)
|
143 |
-
|
144 |
-
else:
|
145 |
-
st.sidebar.error("Put a valid LinkedIn post url only")
|
146 |
-
else:
|
147 |
-
st.sidebar.error("Please Enter a Link")
|
148 |
-
|
149 |
-
|
150 |
-
if __name__ == "__main__":
|
151 |
-
main()
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|