vkasyap commited on
Commit
e7daa3f
·
verified ·
1 Parent(s): c32ef27

Rename app (2).py to app.py

Browse files
Files changed (1) hide show
  1. app (2).py → app.py +0 -44
app (2).py → app.py RENAMED
@@ -1,6 +1,5 @@
1
  import gradio as gr
2
  import os
3
-
4
  from langchain_community.document_loaders import PyPDFLoader
5
  from langchain.text_splitter import RecursiveCharacterTextSplitter
6
  from langchain_community.vectorstores import Chroma
@@ -10,11 +9,9 @@ from langchain_community.llms import HuggingFacePipeline
10
  from langchain.chains import ConversationChain
11
  from langchain.memory import ConversationBufferMemory
12
  from langchain_community.llms import HuggingFaceEndpoint
13
-
14
  from pathlib import Path
15
  import chromadb
16
  from unidecode import unidecode
17
-
18
  from transformers import AutoTokenizer, AutoModelForMaskedLM
19
  import transformers
20
  import torch
@@ -28,10 +25,6 @@ model = AutoModelForMaskedLM.from_pretrained("google/muril-base-cased")
28
 
29
  # default_persist_directory = './chroma_HF/'
30
  list_llm = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.1", \
31
- "google/gemma-7b-it","google/gemma-2b-it", \
32
- "HuggingFaceH4/zephyr-7b-beta", "HuggingFaceH4/zephyr-7b-gemma-v0.1", \
33
- "meta-llama/Llama-2-7b-chat-hf", "microsoft/phi-2", \
34
- "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "mosaicml/mpt-7b-instruct", "tiiuae/falcon-7b-instruct", \
35
  "google/flan-t5-xxl"
36
  ]
37
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
@@ -94,42 +87,6 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
94
  top_k = top_k,
95
  load_in_8bit = True,
96
  )
97
- elif llm_model in ["HuggingFaceH4/zephyr-7b-gemma-v0.1","mosaicml/mpt-7b-instruct"]:
98
- raise gr.Error("LLM model is too large to be loaded automatically on free inference endpoint")
99
- llm = HuggingFaceEndpoint(
100
- repo_id=llm_model,
101
- temperature = temperature,
102
- max_new_tokens = max_tokens,
103
- top_k = top_k,
104
- )
105
- elif llm_model == "microsoft/phi-2":
106
- # raise gr.Error("phi-2 model requires 'trust_remote_code=True', currently not supported by langchain HuggingFaceHub...")
107
- llm = HuggingFaceEndpoint(
108
- repo_id=llm_model,
109
- # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
110
- temperature = temperature,
111
- max_new_tokens = max_tokens,
112
- top_k = top_k,
113
- trust_remote_code = True,
114
- torch_dtype = "auto",
115
- )
116
- elif llm_model == "TinyLlama/TinyLlama-1.1B-Chat-v1.0":
117
- llm = HuggingFaceEndpoint(
118
- repo_id=llm_model,
119
- # model_kwargs={"temperature": temperature, "max_new_tokens": 250, "top_k": top_k}
120
- temperature = temperature,
121
- max_new_tokens = 250,
122
- top_k = top_k,
123
- )
124
- elif llm_model == "meta-llama/Llama-2-7b-chat-hf":
125
- raise gr.Error("Llama-2-7b-chat-hf model requires a Pro subscription...")
126
- llm = HuggingFaceEndpoint(
127
- repo_id=llm_model,
128
- # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
129
- temperature = temperature,
130
- max_new_tokens = max_tokens,
131
- top_k = top_k,
132
- )
133
  else:
134
  llm = HuggingFaceEndpoint(
135
  repo_id=llm_model,
@@ -222,7 +179,6 @@ def format_chat_history(message, chat_history):
222
  formatted_chat_history.append(f"Assistant: {bot_message}")
223
  return formatted_chat_history
224
 
225
-
226
  def conversation(qa_chain, message, history):
227
  formatted_chat_history = format_chat_history(message, history)
228
  #print("formatted_chat_history",formatted_chat_history)
 
1
  import gradio as gr
2
  import os
 
3
  from langchain_community.document_loaders import PyPDFLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langchain_community.vectorstores import Chroma
 
9
  from langchain.chains import ConversationChain
10
  from langchain.memory import ConversationBufferMemory
11
  from langchain_community.llms import HuggingFaceEndpoint
 
12
  from pathlib import Path
13
  import chromadb
14
  from unidecode import unidecode
 
15
  from transformers import AutoTokenizer, AutoModelForMaskedLM
16
  import transformers
17
  import torch
 
25
 
26
  # default_persist_directory = './chroma_HF/'
27
  list_llm = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.1", \
 
 
 
 
28
  "google/flan-t5-xxl"
29
  ]
30
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
 
87
  top_k = top_k,
88
  load_in_8bit = True,
89
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  else:
91
  llm = HuggingFaceEndpoint(
92
  repo_id=llm_model,
 
179
  formatted_chat_history.append(f"Assistant: {bot_message}")
180
  return formatted_chat_history
181
 
 
182
  def conversation(qa_chain, message, history):
183
  formatted_chat_history = format_chat_history(message, history)
184
  #print("formatted_chat_history",formatted_chat_history)