mathslearn commited on
Commit
a1a07b5
·
verified ·
1 Parent(s): da1565c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer
2
 
3
  from langchain.chat_models import ChatOpenAI
4
  from langchain.chains import ConversationChain
@@ -7,13 +7,15 @@ from langchain.prompts import PromptTemplate
7
 
8
  import gradio as gr
9
 
10
- REPO_ID = "Xenova/gpt-3.5-turbo"
11
 
12
  # Load the model and tokenizer from Hugging Face's model hub
13
- model = AutoModelForCausalLM.from_pretrained(REPO_ID)
14
- tokenizer = AutoTokenizer.from_pretrained(REPO_ID)
15
 
16
- llm = ChatOpenAI(model=model, tokenizer=tokenizer)
 
 
17
 
18
  if 'buffer_memory' not in st.session_state:
19
  st.session_state.buffer_memory = ConversationBufferWindowMemory(k=8)
 
1
+ #from transformers import AutoModelForCausalLM, AutoTokenizer
2
 
3
  from langchain.chat_models import ChatOpenAI
4
  from langchain.chains import ConversationChain
 
7
 
8
  import gradio as gr
9
 
10
+ #REPO_ID = "Xenova/gpt-3.5-turbo"
11
 
12
  # Load the model and tokenizer from Hugging Face's model hub
13
+ #model = AutoModelForCausalLM.from_pretrained(REPO_ID)
14
+ #tokenizer = AutoTokenizer.from_pretrained(REPO_ID)
15
 
16
+ #llm = ChatOpenAI(model=model, tokenizer=tokenizer)
17
+
18
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key="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")
19
 
20
  if 'buffer_memory' not in st.session_state:
21
  st.session_state.buffer_memory = ConversationBufferWindowMemory(k=8)