Shreyas094 commited on
Commit
664e897
1 Parent(s): a47e6ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -7
app.py CHANGED
@@ -6,9 +6,8 @@ import torch
6
  from huggingface_hub import login
7
  import os
8
 
9
-
10
- # Retrieve the Hugging Face token from secrets (replace 'HUGGINGFACE_TOKEN' with your secret key)
11
- hf_token = os.getenv('My_Token')
12
 
13
  # Log in to Hugging Face
14
  login(token=hf_token)
@@ -101,10 +100,18 @@ def google_search(term, num_results=5, lang="en", timeout=5, safe="active", ssl_
101
  print(f"Total results fetched: {len(all_results)}")
102
  return all_results
103
 
104
- # Load the Mixtral-8x7B-Instruct model and tokenizer
105
  model_name = 'mistralai/Mistral-7B-Instruct-v0.3'
106
- tokenizer = AutoTokenizer.from_pretrained(model_name)
107
- model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
 
 
 
 
 
 
108
 
109
  # Check if a GPU is available and if not, fall back to CPU
110
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -118,7 +125,7 @@ search_results = google_search(search_term, num_results=3)
118
  combined_text = "\n\n".join(result['text'] for result in search_results if result['text'])
119
 
120
  # Tokenize the input text
121
- inputs = tokenizer(combined_text, return_tensors="pt")
122
 
123
  # Generate a response
124
  outputs = model.generate(**inputs, max_length=150, temperature=0.7, top_p=0.9, top_k=50)
 
6
  from huggingface_hub import login
7
  import os
8
 
9
+ # Directly assign your Hugging Face token here
10
+ hf_token = "your_hugging_face_api_token"
 
11
 
12
  # Log in to Hugging Face
13
  login(token=hf_token)
 
100
  print(f"Total results fetched: {len(all_results)}")
101
  return all_results
102
 
103
+ # Load the Mixtral-8x7B-Instruct model and tokenizer with authorization header
104
  model_name = 'mistralai/Mistral-7B-Instruct-v0.3'
105
+ headers = {"Authorization": f"Bearer {hf_token}"}
106
+
107
+ # Ensure sentencepiece is installed
108
+ try:
109
+ import sentencepiece
110
+ except ImportError:
111
+ raise ImportError("The sentencepiece library is required for this tokenizer. Please install it with `pip install sentencepiece`.")
112
+
113
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_token)
114
+ model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=hf_token)
115
 
116
  # Check if a GPU is available and if not, fall back to CPU
117
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
125
  combined_text = "\n\n".join(result['text'] for result in search_results if result['text'])
126
 
127
  # Tokenize the input text
128
+ inputs = tokenizer(combined_text, return_tensors="pt").to(device)
129
 
130
  # Generate a response
131
  outputs = model.generate(**inputs, max_length=150, temperature=0.7, top_p=0.9, top_k=50)