sugiv commited on
Commit
c6ebbc4
·
1 Parent(s): 99c0ca6

Adding a simple monkey search for Leetcode - Darn LeetMonkey

Browse files
Files changed (1) hide show
  1. app.py +5 -17
app.py CHANGED
@@ -93,23 +93,11 @@ def generate_response(user_query, top_k=5):
93
  user_prompt = f"Based on the following query, recommend relevant LeetCode problems:\n{user_query}"
94
  full_prompt = f"{system_prompt}\n\n{few_shot_prompt}\n{user_prompt}\n\nRecommendations:"
95
 
96
- input_ids = tokenizer.encode(full_prompt, return_tensors="pt").to(device)
97
- attention_mask = torch.ones_like(input_ids)
98
-
99
- with torch.no_grad():
100
- output = model.generate(
101
- input_ids,
102
- attention_mask=attention_mask,
103
- max_new_tokens=100, # Adjust as needed
104
- do_sample=True,
105
- top_p=0.9,
106
- temperature=0.7,
107
- num_return_sequences=1,
108
- pad_token_id=tokenizer.eos_token_id
109
- )
110
-
111
- response = tokenizer.decode(output[0], skip_special_tokens=True)
112
- recommendations = response.split("Recommendations:")[1].strip()
113
  return recommendations
114
 
115
  # Create Gradio interface
 
93
  user_prompt = f"Based on the following query, recommend relevant LeetCode problems:\n{user_query}"
94
  full_prompt = f"{system_prompt}\n\n{few_shot_prompt}\n{user_prompt}\n\nRecommendations:"
95
 
96
+ # Generate response using Llama model
97
+ response = llm(full_prompt, max_tokens=150, temperature=0.7, top_p=0.9)
98
+
99
+ # Extract the generated recommendations
100
+ recommendations = response['choices'][0]['text'].strip()
 
 
 
 
 
 
 
 
 
 
 
 
101
  return recommendations
102
 
103
  # Create Gradio interface