freshbash commited on
Commit
7866d1b
·
1 Parent(s): a2946a5

updated requirements.txt

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +4 -0
  3. requirements.txt +6 -1
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 💬
4
  colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 5.0.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
4
  colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 5.6.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
app.py CHANGED
@@ -1,10 +1,12 @@
1
  import gradio as gr
2
  import keras_nlp
 
3
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
 
 
8
  model = keras_nlp.models.GemmaCausalLM.from_preset("kaggle://bhashwar22/gemma-for-finance/keras/gemma-for-finance")
9
  print("model successfully loaded!")
10
 
@@ -34,12 +36,14 @@ def respond(
34
 
35
  messages += f"Question: {message}\nAnswer: "
36
 
 
37
  output = model.generate(
38
  messages,
39
  max_tokens=max_tokens,
40
  temperature=temperature,
41
  top_p=top_o
42
  )
 
43
 
44
  # Split by "Answer:" from the right and get the last part
45
  response = output.rsplit("Answer: ", 1)[-1]
 
1
  import gradio as gr
2
  import keras_nlp
3
+ print("Modules loaded!")
4
 
5
  """
6
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
  """
8
 
9
+ print("Fetching model...")
10
  model = keras_nlp.models.GemmaCausalLM.from_preset("kaggle://bhashwar22/gemma-for-finance/keras/gemma-for-finance")
11
  print("model successfully loaded!")
12
 
 
36
 
37
  messages += f"Question: {message}\nAnswer: "
38
 
39
+ print("Generating response...")
40
  output = model.generate(
41
  messages,
42
  max_tokens=max_tokens,
43
  temperature=temperature,
44
  top_p=top_o
45
  )
46
+ print("Response generated!")
47
 
48
  # Split by "Answer:" from the right and get the last part
49
  response = output.rsplit("Answer: ", 1)[-1]
requirements.txt CHANGED
@@ -1 +1,6 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
 
1
+ gradio
2
+ huggingface_hub==0.25.2
3
+ keras==3.4.1
4
+ keras_nlp==0.15.1
5
+ transformers
6
+ tensorflow