File size: 8,471 Bytes
e641e34 92ad35f 42ea31f d2525f1 9540be5 e20cfda 1655962 e20cfda bb8e0ce e20cfda bb8e0ce e20cfda d2525f1 aa77331 d2525f1 f3af7d2 91cbc82 cbad031 70d6d78 e762edb cbad031 9ff5681 e641e34 a45b742 902ad5f a45b742 85a602b 902ad5f a45b742 902ad5f a45b742 85a602b aa77331 2c779a0 aa77331 85a602b a45b742 85a602b 70a77fc 85a602b a45b742 12655ff 85a602b ab0b818 820e034 be4c373 12655ff 70a77fc a45b742 a8ed065 a45b742 902ad5f a45b742 902ad5f a45b742 902ad5f a45b742 20d50d2 ee46b6a 20d50d2 59a037f ced4b4f aa77331 7e7deeb f345069 7e7deeb d2525f1 5553cc7 3fbe171 d2525f1 7e7deeb 0bc676a 31f8607 9540be5 2c779a0 e3bc789 d304120 e3bc789 d304120 2c779a0 68ac0eb 2c779a0 7e7deeb 85a602b 42ea31f 7845b8a aa77331 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 |
import gradio as gr
import wikipedia
import requests
from bs4 import BeautifulSoup
import pyjokes
def essay_query(payload):
API_URL = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
data = json.dumps(payload)
response = requests.request("POST", API_URL, headers=headers, data=data)
return json.loads(response.content.decode("utf-8"))
def essay(name):
result_count = 2
f_result = ""
result = {"",""}
text =""
url = "https://www.google.com/search?q="+name
r = requests.get(url)
soup = BeautifulSoup(r.text,"html.parser")
heading_object=soup.find_all('div')
for info in heading_object:
if '<div class="BNeawe s3v9rd AP7Wnd"><div><div><div class="BNeawe s3v9rd AP7Wnd">' in str(info):
if '›' not in str(info.text) :
result.add(info.text)
n=0
for i in result:
if n!=0:
i = i.split("·",1)
try:
i = i[1]
except:
i = i[0]
i=i.split("Duration")
i = i[0]
text = text +str(n)+"\t"+i+"\n\n"
n=n+1
if result_count == 1:
temp = ""
else:
for r in text.split("\n\n")[0:-1]:
if "..." in r:
r = r.split("...")
w = essay_query(r[0].replace("\xa0",""))
f_result = f_result + (w[0]['summary_text'])
else:
#print(r[:-1])
w = essay_query(r[:-1])
f_result = f_result +(w[0]['summary_text'])
return f_result
def code(name):
name = name.split('learn')[-1]
name = name.split('start')[-1]
name = name.split()[0]
url = "https://www.w3schools.com/"+name+"/"+name+"_syntax.asp"
r = requests.get(url)
soup = BeautifulSoup(r.text,"html.parser")
heading_object=soup.find_all('div')
result = ""
for info in heading_object:
info1 = str(info)
if '</script>' not in info1 and '<div class="w3-col l10 m12" id="main">' in info1:
#print(n)
text = str(info.text).split('Next ❯')[1].split("❮ Previous")[0].split("\n\n\n")
#print(text)
for r in text:
if "Test Yourself With Exercises" in r or "Submit Answer »" in r or "On this page" in r:
continue
else:
result = result + r+"\n\n"
return result
def joke():
# importing installed library
My_joke = pyjokes.get_joke(language="en", category="neutral")
return My_joke
def wiki(name):
text = name
text = text.split("the")[-1]
text = text.split("is a")[-1]
text = text.split("by")[-1]
#print(wikipedia.search(text, results=20))
#print(text)
out = "try this key words :\n"+str(wikipedia.search(text, results=10))+"\n\n"
for i in wikipedia.search(text, results=3):
try:
result = wikipedia.summary(i)
if " " in result.lower():
#print(result)
#print()
out = out + result+"\n"
except:
continue
return out
import openai
openai.api_key = "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"
def aitext(word):
response = openai.Completion.create(
model="text-davinci-003",
prompt=word,
temperature=0.9,
max_tokens=200,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
stop=[" Human:", " AI:"]
)
return response.choices[0].text
import json
headers = {"Authorization": f"Bearer {'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'}"}
def sumy(payload):
API_URL = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
data = json.dumps(payload)
response = requests.request("POST", API_URL, headers=headers, data=data)
return json.loads(response.content.decode("utf-8"))
def query(payload):
API_URL = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
data = json.dumps(payload)
response = requests.request("POST", API_URL, headers=headers, data=data)
return json.loads(response.content.decode("utf-8"))
def google(name):
if "give" in name or "reason" in name or "result" in name or "step" in name:
result_count = 2
print(name)
else:
result_count = 1
f_result = ""
result = {"",""}
text =""
url = "https://www.google.com/search?q="+name
r = requests.get(url)
soup = BeautifulSoup(r.text,"html.parser")
heading_object=soup.find_all('div')
for info in heading_object:
if '<div class="BNeawe s3v9rd AP7Wnd"><div><div><div class="BNeawe s3v9rd AP7Wnd">' in str(info):
if '›' not in str(info.text) :
result.add(info.text)
n=0
for i in result:
if n!=0:
i = i.split("·",1)
try:
i = i[1]
except:
i = i[0]
i=i.split("Duration")
i = i[0]
text = text +str(n)+"\t"+i+"\n\n"
n=n+1
if result_count == 1:
temp = ""
for r in text.split("\n\n"):
temp = temp+r.split("...")[0]
f_result = sumy({"inputs":temp,"parameters": {"do_sample": False,"max_length":300}})
return f_result[0]['summary_text']
else:
n=1
for r in text.split("\n\n")[2:-2]:
if len(r)>10:
if "..." in r:
r = r.split("...")
w = query(r[0].replace("\xa0",""))
f_result = f_result + str(n)+"\t"+(w[0]['summary_text'])+"\n\n"+r"\\"
else:
#print(r[:-1])
w = query(r[:-1])
f_result = f_result + str(n)+"\t"+(w[0]['summary_text'])+"\n\n"+r"\\"
n=n+1
return f_result
from PyDictionary import PyDictionary
def greet(name1):
name = name1.lower()
dictionary=PyDictionary()
return "Noun :"+ str(dictionary.meaning(name)['Noun']) + "\nVerb :"+ str(dictionary.meaning(name)['Verb'])
if "who are you" in name or "what is you" in name or "your name" in name or"who r u" in name:
return "Im Ai Based Chatbot Created by ssebowa.org"
if "who developed you" in name or "what is you" in name or "who mad you" in name or "who made you" in name:
return "ssebowa.org"
if "tell me a joke" in name or "the joke" in name:
return joke()
if "love you" in name or "i love" in name:
return "me too"
if "marry me" in name or "marry" in name:
return "im not intrested"
if "your age" in name or "what is your age" in name:
return "Im not a human so i don't have age"
if "thank u" in name or "thanks" in name or "thank you" in name:
return "ok welcome ....!"
if "write the essay" in name or "write essay" in name:
name = name.split("about")[-1]
return essay(name)
if "how to learn" in name or "steps for learning" in name or "step for learning" in name or "steps for" in name or "step for" in name:
try:
cresult = code(name)
return google(name)+"\n\n"+cresult
except:
return google(name)
else:
return google(name)+""
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()
|