abhi1nandy2 commited on
Commit
3421ed4
·
verified ·
1 Parent(s): 2eca6f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -65
app.py CHANGED
@@ -1,17 +1,9 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
-
4
  import requests
5
  from bs4 import BeautifulSoup
6
  from bs4.element import Comment
7
 
8
- def get_text_from_url(url):
9
- response = requests.get(url)
10
- soup = BeautifulSoup(response.text, 'html.parser')
11
- texts = soup.find_all(text=True)
12
- visible_texts = filter(tag_visible, texts)
13
- return u"\n".join(t.strip() for t in visible_texts)
14
-
15
  def tag_visible(element):
16
  if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
17
  return False
@@ -19,60 +11,56 @@ def tag_visible(element):
19
  return False
20
  return True
21
 
 
 
 
 
 
 
 
 
 
22
  text_list = []
23
  homepage_url = "https://sites.google.com/view/abhilashnandy/home/"
24
  extensions = ["", "pmrf-profile-page"]
25
  for ext in extensions:
26
- url_text = get_text_from_url(homepage_url+ext)
27
  text_list.append(url_text)
28
- # Repeat for sub-links if necessary
29
-
30
- """
31
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
32
- """
33
- client = InferenceClient("stabilityai/stablelm-2-1_6b-chat")#("stabilityai/stablelm-2-1_6b-chat")#("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ")#("TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF")#("QuantFactory/Meta-Llama-3-8B-Instruct-GGUF")#("HuggingFaceH4/zephyr-7b-beta")
34
-
35
- SYSTEM_MESSAGE = "You are a QA chatbot to answer queries (in less than 30 words) on my homepage that has the following information -\n\n" + "\n\n".join(text_list) + "\n\n"
36
 
37
- def respond(
38
- message,
39
- history: list[tuple[str, str]],
40
- system_message=SYSTEM_MESSAGE,
41
- max_tokens=140,
42
- temperature=0.7,
43
- top_p=0.95,
44
- ):
45
- messages = [{"role": "system", "content": system_message}]
46
 
47
- for val in history:
48
- if len(val) >= 1:
49
- messages.append({"role": "user", "content": "Question: " + val[0]})
50
- if len(val) >= 2:
51
- messages.append({"role": "assistant", "content": "Answer: " + val[1]})
52
 
53
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
54
 
55
  try:
56
- response = client.chat_completion(
57
- messages,
58
- max_tokens=max_tokens,
59
  temperature=temperature,
60
  top_p=top_p,
61
- # stream=True, # Disable streaming for debugging
62
  )
63
- return response.choices[0].message["content"]
64
-
 
 
 
65
  except Exception as e:
66
  print(f"An error occurred: {e}")
67
- return str(e) #"An error occurred while processing the response."
68
-
69
-
70
-
71
- """
72
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
73
- """
74
-
75
- # initial_message = [("user", "Yo who dis Abhilash?")]
76
 
77
  markdown_note = "## Ask Anything About Me! (Might show a tad bit of hallucination!)"
78
 
@@ -81,24 +69,10 @@ demo = gr.Blocks()
81
  with demo:
82
  gr.Markdown(markdown_note)
83
  gr.ChatInterface(
84
- respond,
85
- examples = ["Yo who dis Abhilash?", "What is Abhilash's most recent publication?"],
86
- # message=initial_message,
87
- additional_inputs=[
88
- # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
89
- # gr.Slider(minimum=1, maximum=8192, value=512, step=1, label="Max new tokens"),
90
- # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
91
- # gr.Slider(
92
- # minimum=0.1,
93
- # maximum=1.0,
94
- # value=0.95,
95
- # step=0.05,
96
- # label="Top-p (nucleus sampling)",
97
- # ),
98
- ],
99
- # value=initial_message
100
- )
101
-
102
 
103
  if __name__ == "__main__":
104
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
  import requests
4
  from bs4 import BeautifulSoup
5
  from bs4.element import Comment
6
 
 
 
 
 
 
 
 
7
  def tag_visible(element):
8
  if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
9
  return False
 
11
  return False
12
  return True
13
 
14
+ def get_text_from_url(url):
15
+ response = requests.get(url)
16
+ soup = BeautifulSoup(response.text, 'html.parser')
17
+ texts = soup.find_all(text=True)
18
+ visible_texts = filter(tag_visible, texts)
19
+ # Only join non-empty lines
20
+ return "\n".join(t.strip() for t in visible_texts if t.strip())
21
+
22
+ # Collect homepage text from various URL extensions
23
  text_list = []
24
  homepage_url = "https://sites.google.com/view/abhilashnandy/home/"
25
  extensions = ["", "pmrf-profile-page"]
26
  for ext in extensions:
27
+ url_text = get_text_from_url(homepage_url + ext)
28
  text_list.append(url_text)
 
 
 
 
 
 
 
 
29
 
30
+ # Build the system message containing homepage info
31
+ SYSTEM_MESSAGE = (
32
+ "You are a QA chatbot to answer queries (in less than 30 words) on my homepage that has the following information -\n\n"
33
+ + "\n\n".join(text_list)
34
+ + "\n\n"
35
+ )
 
 
 
36
 
37
+ # Use a lightweight model for low-latency CPU inference
38
+ client = InferenceClient("microsoft/DialoGPT-small")
 
 
 
39
 
40
+ def respond(message, history: list[tuple[str, str]], system_message=SYSTEM_MESSAGE,
41
+ max_tokens=140, temperature=0.7, top_p=0.95):
42
+ # Construct the prompt including the system message and conversation history.
43
+ prompt = system_message
44
+ for user_q, bot_a in history:
45
+ prompt += f"Question: {user_q}\n"
46
+ prompt += f"Answer: {bot_a}\n"
47
+ prompt += f"Question: {message}\nAnswer:"
48
 
49
  try:
50
+ response = client.text_generation(
51
+ prompt,
52
+ max_new_tokens=max_tokens,
53
  temperature=temperature,
54
  top_p=top_p,
 
55
  )
56
+ # Expecting a list of dict(s) with key "generated_text"
57
+ generated_text = response[0]["generated_text"]
58
+ # Attempt to extract the answer by splitting at "Answer:"
59
+ answer = generated_text.split("Answer:")[-1].strip().split("\n")[0].strip()
60
+ return answer
61
  except Exception as e:
62
  print(f"An error occurred: {e}")
63
+ return str(e)
 
 
 
 
 
 
 
 
64
 
65
  markdown_note = "## Ask Anything About Me! (Might show a tad bit of hallucination!)"
66
 
 
69
  with demo:
70
  gr.Markdown(markdown_note)
71
  gr.ChatInterface(
72
+ respond,
73
+ examples=["Yo who dis Abhilash?", "What is Abhilash's most recent publication?"],
74
+ additional_inputs=[],
75
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
  if __name__ == "__main__":
78
  demo.launch()