vaniagrawal commited on
Commit
081b87c
·
verified ·
1 Parent(s): 79342f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -7,13 +7,14 @@ hf_token = os.environ.get("HF_HOME", None)
7
 
8
  # Load the model and tokenizer
9
  model_name = "meta-llama/CodeLlama-7b-hf"
10
- model = AutoModelForCausalLM.from_pretrained(model_name, token=hf_token)
11
  tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
12
 
13
 
14
  def generate_code(prompt):
15
- inputs = tokenizer(prompt, return_tensors="pt")
16
- outputs = model.generate(inputs["input_ids"], max_length=200)
 
17
  code = tokenizer.decode(outputs[0], skip_special_tokens=True)
18
  return code
19
 
 
7
 
8
  # Load the model and tokenizer
9
  model_name = "meta-llama/CodeLlama-7b-hf"
10
+ model = AutoModelForCausalLM.from_pretrained(model_name, token=hf_token, torch_dtype="float16", device_map="auto")
11
  tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
12
 
13
 
14
  def generate_code(prompt):
15
+ batch_size = 1
16
+ inputs = tokenizer(input_texts, return_tensors="pt", padding=True, truncation=True, max_length=512)
17
+ outputs = model.generate(inputs['input_ids'], max_length=512, num_return_sequences=batch_size)
18
  code = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
  return code
20