Atharva Prashant Pawar commited on
Commit
478270a
·
1 Parent(s): 4e6eff0
Files changed (1) hide show
  1. app.py +28 -3
app.py CHANGED
@@ -11,11 +11,36 @@ st.title("Mistral Model Integration")
11
  instruction = st.text_area("Enter your prompt:")
12
 
13
  # Function to interact with Mistral Model
 
 
 
 
 
 
14
  def mistral_model(prompt, token_limit):
15
- # Your model loading and inference code here (from the code you provided)
16
- # ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- return responses
19
 
20
  # Check if the user entered a prompt
21
  if instruction:
 
11
  instruction = st.text_area("Enter your prompt:")
12
 
13
  # Function to interact with Mistral Model
14
+ # def mistral_model(prompt, token_limit):
15
+ # # Your model loading and inference code here (from the code you provided)
16
+ # # ...
17
+
18
+ # return responses
19
+
20
  def mistral_model(prompt, token_limit):
21
+ # Initialize the model and tokenizer
22
+ model_name = "bn22/Mistral-7B-Instruct-v0.1-sharded"
23
+ adapters_name = "atharvapawar/flaskCodemistral-7b-mj-finetuned"
24
+ device = "cuda" # Use "cuda" for GPU or "cpu" for CPU
25
+
26
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
27
+ model = AutoModelForCausalLM.from_pretrained(model_name)
28
+
29
+ # Load the adapter
30
+ model = PeftModel.from_pretrained(model, adapters_name)
31
+
32
+ # Generate responses
33
+ text = "[INST]" + prompt + "[/INST]"
34
+ encoded = tokenizer(text, return_tensors="pt", add_special_tokens=False)
35
+ model.to(device)
36
+ generated_ids = model.generate(**encoded, max_length=token_limit, do_sample=True)
37
+ decoded = tokenizer.batch_decode(generated_ids)
38
+
39
+ return decoded
40
+
41
+
42
+
43
 
 
44
 
45
  # Check if the user entered a prompt
46
  if instruction: