aidevhund commited on
Commit
48271f9
·
verified ·
1 Parent(s): e5a36e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -33
app.py CHANGED
@@ -12,37 +12,64 @@ import base64
12
  # Load environment variables
13
  load_dotenv()
14
 
15
- # Mapping for display names and actual model names
16
- llm_display_names = {
17
- "tiiuae/falcon-7b-instruct": "HundAI",
18
  "mistralai/Mixtral-8x7B-Instruct-v0.1": "Mixtral-8x7B",
19
- "meta-llama/Meta-Llama-3-8B-Instruct": "Meta-Llama-3",
20
  "mistralai/Mistral-7B-Instruct-v0.2": "Mistral-7B",
 
21
  }
 
22
  embed_models = [
23
  "BAAI/bge-small-en-v1.5", # 33.4M
24
  "NeuML/pubmedbert-base-embeddings",
25
- "BAAI/llm-embedder", # 109M
26
- "BAAI/bge-large-en" # 335M
27
  ]
28
 
29
- # Reverse mapping to retrieve original names
30
- llm_reverse_mapping = {v: k for k, v in llm_display_names.items()}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
- # Update UI to use display names
33
- def set_llm_model(display_name):
34
  global selected_llm_model_name
35
- # Retrieve the original model name using the reverse mapping
36
- selected_llm_model_name = llm_reverse_mapping.get(display_name, display_name)
37
- print(f"Model selected: {selected_llm_model_name}")
38
 
39
- # Respond function remains unchanged
40
  def respond(message, history):
41
  try:
42
- # Initialize the LLM with the selected model
43
  llm = HuggingFaceInferenceAPI(
44
- model_name=selected_llm_model_name, # Use the backend model name
45
- contextWindow=8192,
46
  maxTokens=1024,
47
  temperature=0.3,
48
  topP=0.9,
@@ -50,13 +77,9 @@ def respond(message, history):
50
  presencePenalty=0.5,
51
  token=os.getenv("TOKEN")
52
  )
53
-
54
- # Set up the query engine with the selected LLM
55
  query_engine = vector_index.as_query_engine(llm=llm)
56
  bot_message = query_engine.query(message)
57
-
58
- print(f"\n{datetime.now()}:{selected_llm_model_name}:: {message} --> {str(bot_message)}\n")
59
- return f"{selected_llm_model_name}:\n{str(bot_message)}"
60
  except Exception as e:
61
  if str(e) == "'NoneType' object has no attribute 'as_query_engine'":
62
  return "Please upload a file."
@@ -80,27 +103,18 @@ with gr.Blocks(theme=gr.themes.Soft(font=[gr.themes.GoogleFont("Roboto Mono")]),
80
  btn = gr.Button("Submit", variant='primary')
81
  clear = gr.ClearButton()
82
  output = gr.Text(label='Vector Index')
83
-
84
- # Use display names for LLM dropdown
85
- llm_model_dropdown = gr.Dropdown(
86
- list(llm_display_names.values()), # Display names
87
- label="Select LLM",
88
- interactive=True
89
- )
90
-
91
  with gr.Column(scale=3):
92
  gr.ChatInterface(
93
  fn=respond,
94
  chatbot=gr.Chatbot(height=500),
95
- show_progress='full',
96
  textbox=gr.Textbox(placeholder="Ask me any questions on the uploaded document!", container=False)
97
  )
98
 
99
- # Set up Gradio interactions
100
  llm_model_dropdown.change(fn=set_llm_model, inputs=llm_model_dropdown)
101
  btn.click(fn=load_files, inputs=[file_input, embed_model_dropdown], outputs=output)
102
  clear.click(lambda: [None] * 3, outputs=[file_input, embed_model_dropdown, output])
103
 
104
- # Launch the demo with a public link option
105
  if __name__ == "__main__":
106
  demo.launch()
 
12
  # Load environment variables
13
  load_dotenv()
14
 
15
+ llm_models = {
 
 
16
  "mistralai/Mixtral-8x7B-Instruct-v0.1": "Mixtral-8x7B",
17
+ "meta-llama/Meta-Llama-3-8B-Instruct": "Meta-Llama-8B",
18
  "mistralai/Mistral-7B-Instruct-v0.2": "Mistral-7B",
19
+ "tiiuae/falcon-7b-instruct": "HundAI", # Model renamed for UI display
20
  }
21
+
22
  embed_models = [
23
  "BAAI/bge-small-en-v1.5", # 33.4M
24
  "NeuML/pubmedbert-base-embeddings",
25
+ "BAAI/llm-embedder", # 109M
26
+ "BAAI/bge-large-en" # 335M
27
  ]
28
 
29
+ # Global variable for selected model
30
+ selected_llm_model_name = list(llm_models.keys())[0] # Default to the first model in the dictionary
31
+ vector_index = None
32
+
33
+ # Initialize the parser
34
+ parser = LlamaParse(api_key=os.getenv("LLAMA_INDEX_API"), result_type='markdown')
35
+ file_extractor = {
36
+ '.pdf': parser,
37
+ '.docx': parser,
38
+ '.txt': parser,
39
+ '.csv': parser,
40
+ '.xlsx': parser,
41
+ '.pptx': parser,
42
+ '.html': parser,
43
+ '.jpg': parser,
44
+ '.jpeg': parser,
45
+ '.png': parser,
46
+ '.webp': parser,
47
+ '.svg': parser,
48
+ }
49
+
50
+ # File processing function
51
+ def load_files(file_path: str, embed_model_name: str):
52
+ try:
53
+ global vector_index
54
+ document = SimpleDirectoryReader(input_files=[file_path], file_extractor=file_extractor).load_data()
55
+ embed_model = HuggingFaceEmbedding(model_name=embed_model_name)
56
+ vector_index = VectorStoreIndex.from_documents(document, embed_model=embed_model)
57
+ filename = os.path.basename(file_path)
58
+ return f"Ready to give response on {filename}"
59
+ except Exception as e:
60
+ return f"An error occurred: {e}"
61
 
62
+ # Function to handle the selected model from dropdown
63
+ def set_llm_model(selected_model):
64
  global selected_llm_model_name
65
+ selected_llm_model_name = next(key for key, value in llm_models.items() if value == selected_model)
 
 
66
 
67
+ # Respond function
68
  def respond(message, history):
69
  try:
 
70
  llm = HuggingFaceInferenceAPI(
71
+ model_name=selected_llm_model_name,
72
+ contextWindow=8192,
73
  maxTokens=1024,
74
  temperature=0.3,
75
  topP=0.9,
 
77
  presencePenalty=0.5,
78
  token=os.getenv("TOKEN")
79
  )
 
 
80
  query_engine = vector_index.as_query_engine(llm=llm)
81
  bot_message = query_engine.query(message)
82
+ return f"{llm_models[selected_llm_model_name]}:\n{str(bot_message)}"
 
 
83
  except Exception as e:
84
  if str(e) == "'NoneType' object has no attribute 'as_query_engine'":
85
  return "Please upload a file."
 
103
  btn = gr.Button("Submit", variant='primary')
104
  clear = gr.ClearButton()
105
  output = gr.Text(label='Vector Index')
106
+ llm_model_dropdown = gr.Dropdown(list(llm_models.values()), label="Select LLM", interactive=True)
 
 
 
 
 
 
 
107
  with gr.Column(scale=3):
108
  gr.ChatInterface(
109
  fn=respond,
110
  chatbot=gr.Chatbot(height=500),
111
+ theme="soft",
112
  textbox=gr.Textbox(placeholder="Ask me any questions on the uploaded document!", container=False)
113
  )
114
 
 
115
  llm_model_dropdown.change(fn=set_llm_model, inputs=llm_model_dropdown)
116
  btn.click(fn=load_files, inputs=[file_input, embed_model_dropdown], outputs=output)
117
  clear.click(lambda: [None] * 3, outputs=[file_input, embed_model_dropdown, output])
118
 
 
119
  if __name__ == "__main__":
120
  demo.launch()