ilemon commited on
Commit
eba820a
·
1 Parent(s): 21ffc5a
Files changed (2) hide show
  1. .ipynb_checkpoints/app-checkpoint.py +8 -13
  2. app.py +8 -13
.ipynb_checkpoints/app-checkpoint.py CHANGED
@@ -4,19 +4,15 @@ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
4
  from llama_index.legacy.callbacks import CallbackManager
5
  from llama_index.llms.openai_like import OpenAILike
6
 
7
-
8
  st.set_page_config(page_title="llama_index_demo", page_icon="🦜🔗")
 
9
  # Create an instance of CallbackManager
10
  callback_manager = CallbackManager()
11
- api_base_url = "https://internlm-chat.intern-ai.org.cn/puyu/api/v1/"
12
  model = "internlm2.5-latest"
13
- api_key = api_key = st.sidebar.text_input('API Key', value='', type='password')
14
 
15
- # api_base_url = "https://api.siliconflow.cn/v1"
16
- # model = "internlm/internlm2_5-7b-chat"
17
- # api_key = "请填写 API Key"
18
-
19
- llm =OpenAILike(model=model, api_base=api_base_url, api_key=api_key, is_chat_model=True,callback_manager=callback_manager)
20
 
21
  st.title("llama_index_demo")
22
 
@@ -28,7 +24,7 @@ def init_models():
28
  )
29
  Settings.embed_model = embed_model
30
 
31
- #用初始化llm
32
  Settings.llm = llm
33
 
34
  documents = SimpleDirectoryReader("./data/xtuner").load_data()
@@ -45,12 +41,11 @@ def greet2(question):
45
  response = st.session_state['query_engine'].query(question)
46
  return response
47
 
48
-
49
  # Store LLM generated responses
50
  if "messages" not in st.session_state.keys():
51
- st.session_state.messages = [{"role": "assistant", "content": "你好,我是你的助手,有什么我可以帮助你的吗?"}]
52
 
53
- # Display or clear chat messages
54
  for message in st.session_state.messages:
55
  with st.chat_message(message["role"]):
56
  st.write(message["content"])
@@ -70,7 +65,7 @@ if prompt := st.chat_input():
70
  with st.chat_message("user"):
71
  st.write(prompt)
72
 
73
- # Gegenerate_llama_index_response last message is not from assistant
74
  if st.session_state.messages[-1]["role"] != "assistant":
75
  with st.chat_message("assistant"):
76
  with st.spinner("Thinking..."):
 
4
  from llama_index.legacy.callbacks import CallbackManager
5
  from llama_index.llms.openai_like import OpenAILike
6
 
 
7
  st.set_page_config(page_title="llama_index_demo", page_icon="🦜🔗")
8
+
9
  # Create an instance of CallbackManager
10
  callback_manager = CallbackManager()
11
+ api_base_url = "https://internlm-chat.intern-ai.org.cn/puyu/api/v1/"
12
  model = "internlm2.5-latest"
13
+ api_key = st.sidebar.text_input('API Key', value='', type='password')
14
 
15
+ llm = OpenAILike(model=model, api_base=api_base_url, api_key=api_key, is_chat_model=True, callback_manager=callback_manager)
 
 
 
 
16
 
17
  st.title("llama_index_demo")
18
 
 
24
  )
25
  Settings.embed_model = embed_model
26
 
27
+ # 初始化 LLM
28
  Settings.llm = llm
29
 
30
  documents = SimpleDirectoryReader("./data/xtuner").load_data()
 
41
  response = st.session_state['query_engine'].query(question)
42
  return response
43
 
 
44
  # Store LLM generated responses
45
  if "messages" not in st.session_state.keys():
46
+ st.session_state.messages = [{"role": "assistant", "content": "你好,我是你的助手,有什么我可以帮助你的吗?"}]
47
 
48
+ # Display or clear chat messages
49
  for message in st.session_state.messages:
50
  with st.chat_message(message["role"]):
51
  st.write(message["content"])
 
65
  with st.chat_message("user"):
66
  st.write(prompt)
67
 
68
+ # Generate response if last message is not from assistant
69
  if st.session_state.messages[-1]["role"] != "assistant":
70
  with st.chat_message("assistant"):
71
  with st.spinner("Thinking..."):
app.py CHANGED
@@ -4,19 +4,15 @@ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
4
  from llama_index.legacy.callbacks import CallbackManager
5
  from llama_index.llms.openai_like import OpenAILike
6
 
7
-
8
  st.set_page_config(page_title="llama_index_demo", page_icon="🦜🔗")
 
9
  # Create an instance of CallbackManager
10
  callback_manager = CallbackManager()
11
- api_base_url = "https://internlm-chat.intern-ai.org.cn/puyu/api/v1/"
12
  model = "internlm2.5-latest"
13
- api_key = api_key = st.sidebar.text_input('API Key', value='', type='password')
14
 
15
- # api_base_url = "https://api.siliconflow.cn/v1"
16
- # model = "internlm/internlm2_5-7b-chat"
17
- # api_key = "请填写 API Key"
18
-
19
- llm =OpenAILike(model=model, api_base=api_base_url, api_key=api_key, is_chat_model=True,callback_manager=callback_manager)
20
 
21
  st.title("llama_index_demo")
22
 
@@ -28,7 +24,7 @@ def init_models():
28
  )
29
  Settings.embed_model = embed_model
30
 
31
- #用初始化llm
32
  Settings.llm = llm
33
 
34
  documents = SimpleDirectoryReader("./data/xtuner").load_data()
@@ -45,12 +41,11 @@ def greet2(question):
45
  response = st.session_state['query_engine'].query(question)
46
  return response
47
 
48
-
49
  # Store LLM generated responses
50
  if "messages" not in st.session_state.keys():
51
- st.session_state.messages = [{"role": "assistant", "content": "你好,我是你的助手,有什么我可以帮助你的吗?"}]
52
 
53
- # Display or clear chat messages
54
  for message in st.session_state.messages:
55
  with st.chat_message(message["role"]):
56
  st.write(message["content"])
@@ -70,7 +65,7 @@ if prompt := st.chat_input():
70
  with st.chat_message("user"):
71
  st.write(prompt)
72
 
73
- # Gegenerate_llama_index_response last message is not from assistant
74
  if st.session_state.messages[-1]["role"] != "assistant":
75
  with st.chat_message("assistant"):
76
  with st.spinner("Thinking..."):
 
4
  from llama_index.legacy.callbacks import CallbackManager
5
  from llama_index.llms.openai_like import OpenAILike
6
 
 
7
  st.set_page_config(page_title="llama_index_demo", page_icon="🦜🔗")
8
+
9
  # Create an instance of CallbackManager
10
  callback_manager = CallbackManager()
11
+ api_base_url = "https://internlm-chat.intern-ai.org.cn/puyu/api/v1/"
12
  model = "internlm2.5-latest"
13
+ api_key = st.sidebar.text_input('API Key', value='', type='password')
14
 
15
+ llm = OpenAILike(model=model, api_base=api_base_url, api_key=api_key, is_chat_model=True, callback_manager=callback_manager)
 
 
 
 
16
 
17
  st.title("llama_index_demo")
18
 
 
24
  )
25
  Settings.embed_model = embed_model
26
 
27
+ # 初始化 LLM
28
  Settings.llm = llm
29
 
30
  documents = SimpleDirectoryReader("./data/xtuner").load_data()
 
41
  response = st.session_state['query_engine'].query(question)
42
  return response
43
 
 
44
  # Store LLM generated responses
45
  if "messages" not in st.session_state.keys():
46
+ st.session_state.messages = [{"role": "assistant", "content": "你好,我是你的助手,有什么我可以帮助你的吗?"}]
47
 
48
+ # Display or clear chat messages
49
  for message in st.session_state.messages:
50
  with st.chat_message(message["role"]):
51
  st.write(message["content"])
 
65
  with st.chat_message("user"):
66
  st.write(prompt)
67
 
68
+ # Generate response if last message is not from assistant
69
  if st.session_state.messages[-1]["role"] != "assistant":
70
  with st.chat_message("assistant"):
71
  with st.spinner("Thinking..."):