Spaces:
Running
Running
Upload fixed_app.py
Browse files- fixed_app.py +20 -0
fixed_app.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# Assuming 'client' is an instance of InferenceClient and 'model_id' is the model name
|
3 |
+
# You might need to import AutoTokenizer from transformers
|
4 |
+
# from transformers import AutoTokenizer
|
5 |
+
|
6 |
+
# Before the line causing the error:
|
7 |
+
# try:
|
8 |
+
# tokenizer = client.tokenizer # This line caused the error
|
9 |
+
# except AttributeError:
|
10 |
+
# # If client.tokenizer is not available, load the tokenizer separately
|
11 |
+
# # Ensure you have the 'transformers' library installed
|
12 |
+
# # You might need to pass authentication_token to AutoTokenizer if your model is private
|
13 |
+
# tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=os.environ.get("HF_TOKEN"))
|
14 |
+
|
15 |
+
# Then, use the 'tokenizer' object to apply the chat template:
|
16 |
+
# prompt_for_generation = tokenizer.apply_chat_template(
|
17 |
+
# messages,
|
18 |
+
# tokenize=False,
|
19 |
+
# add_generation_prompt=True
|
20 |
+
# )
|