harishnair04 commited on
Commit
d902198
1 Parent(s): c59f8fc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -1,8 +1,6 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
- import os
5
- os.environ["KERAS_BACKEND"] = "jax"
6
 
7
  """
8
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
@@ -39,11 +37,13 @@ body::before {
39
 
40
  """
41
 
42
- model_id = "harishnair04/Gemma-medtr-2b-sft-v2-gguf"
43
- filename = "Gemma-medtr-2b-sft-v2.gguf"
44
 
45
- tokenizer = AutoTokenizer.from_pretrained(model_id, gguf_file=filename)
46
- gemma_model = AutoModelForCausalLM.from_pretrained(model_id, gguf_file=filename)
 
 
47
 
48
  tokenizer.pad_token_id = tokenizer.eos_token_id
49
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
 
4
 
5
  """
6
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
37
 
38
  """
39
 
40
+ model_id = "harishnair04/Gemma-medtr-2b-sft-v2"
41
+ # filename = "Gemma-medtr-2b-sft-v2.gguf"
42
 
43
+ # tokenizer = AutoTokenizer.from_pretrained(model_id, gguf_file=filename)
44
+ # gemma_model = AutoModelForCausalLM.from_pretrained(model_id, gguf_file=filename)
45
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
46
+ gemma_model = AutoModelForCausalLM.from_pretrained(model_id)
47
 
48
  tokenizer.pad_token_id = tokenizer.eos_token_id
49