kz209 commited on
Commit
66846f0
1 Parent(s): 185c1c6

change from openai to phi

Browse files
pages/summarization_example.py CHANGED
@@ -30,17 +30,25 @@ summarization: """ # noqa: E501
30
  sources=sources,
31
  )
32
 
33
- answer = lm.chat.completions.create(
34
- temperature=0.8,
35
- max_tokens=800,
36
- messages=[
37
- {
38
- "role": "user",
39
- "content": content,
40
- },
41
- ],
42
- model=model_name,
43
- )
 
 
 
 
 
 
 
 
44
 
45
  return answer
46
 
@@ -58,11 +66,11 @@ input_text1 = st.text_area("question", height=None, \
58
 
59
 
60
  # Button to trigger processing
61
- lm = OpenAI()
62
 
63
  if st.button('Submit'):
64
  if input_text1:
65
- response = generate_answer(lm, input_text1, model_selection)
66
  st.write('## Orginal Article:')
67
  st.markdown(examples[example_selection])
68
 
 
30
  sources=sources,
31
  )
32
 
33
+ from transformers import pipeline
34
+
35
+ messages = [
36
+ {"role": "user", "content": content},
37
+ ]
38
+ pipe = pipeline("text-generation", model="microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
39
+ answer = pipe(messages)
40
+
41
+ # answer = lm.chat.completions.create(
42
+ # temperature=0.8,
43
+ # max_tokens=800,
44
+ # messages=[
45
+ # {
46
+ # "role": "user",
47
+ # "content": content,
48
+ # },
49
+ # ],
50
+ # model=model_name,
51
+ # )
52
 
53
  return answer
54
 
 
66
 
67
 
68
  # Button to trigger processing
69
+ #lm = OpenAI()
70
 
71
  if st.button('Submit'):
72
  if input_text1:
73
+ response = generate_answer('', input_text1, model_selection)
74
  st.write('## Orginal Article:')
75
  st.markdown(examples[example_selection])
76
 
utils/multiple_stream.py CHANGED
@@ -7,7 +7,7 @@ import streamlit as st
7
  from streamlit.runtime.scriptrunner.script_run_context import \
8
  add_script_run_ctx
9
 
10
- _TEST = """
11
  Test of Time. A Benchmark for Evaluating LLMs on Temporal Reasoning. Large language models (LLMs) have \
12
  showcased remarkable reasoning capabilities, yet they remain susceptible to errors, particularly in temporal \
13
  reasoning tasks involving complex temporal logic.
@@ -15,7 +15,7 @@ reasoning tasks involving complex temporal logic.
15
 
16
  def generate_data_test():
17
  """A generator to pass to st.write_stream"""
18
- temp = copy.deepcopy(_TEST)
19
  l1 = temp.split()
20
  random.shuffle(l1)
21
  temp = ' '.join(l1)
 
7
  from streamlit.runtime.scriptrunner.script_run_context import \
8
  add_script_run_ctx
9
 
10
+ _TEST_ = """
11
  Test of Time. A Benchmark for Evaluating LLMs on Temporal Reasoning. Large language models (LLMs) have \
12
  showcased remarkable reasoning capabilities, yet they remain susceptible to errors, particularly in temporal \
13
  reasoning tasks involving complex temporal logic.
 
15
 
16
  def generate_data_test():
17
  """A generator to pass to st.write_stream"""
18
+ temp = copy.deepcopy(_TEST_)
19
  l1 = temp.split()
20
  random.shuffle(l1)
21
  temp = ' '.join(l1)