nadiamaqbool81 commited on
Commit
9e6998b
·
1 Parent(s): 7af0ad0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -44,23 +44,24 @@ def the_process(input_text, model_choice):
44
  tokenizer = AutoTokenizer.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf_python")
45
  model = AutoModelForCausalLM.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf_python")
46
  output = run_predict(input_text, model, tokenizer)
47
- print("output" , output)
48
  if(model_choice==0):
49
  if(javaFlag == "false"):
50
- print("Inside starcoder for python")
51
  tokenizer = AutoTokenizer.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf")
52
  model = AutoModelForCausalLM.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf")
53
  output = run_predict(input_text, model, tokenizer)
54
- print("output" , output)
55
  else:
56
  a_variable = model_box[model_choice]
57
  output = a_variable(input_text)
 
58
  return(output)
59
 
60
 
61
  def run_predict(text, model, tokenizer):
62
  prompt = text
63
- pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=300)
64
  result = pipe(f"<s>[INST] {prompt} [/INST]")
65
  arr = result[0]['generated_text'].split('[/INST]')
66
  return arr[1]
 
44
  tokenizer = AutoTokenizer.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf_python")
45
  model = AutoModelForCausalLM.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf_python")
46
  output = run_predict(input_text, model, tokenizer)
47
+ print("output starcoder python" , output)
48
  if(model_choice==0):
49
  if(javaFlag == "false"):
50
+ print("Inside starcoder for java")
51
  tokenizer = AutoTokenizer.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf")
52
  model = AutoModelForCausalLM.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf")
53
  output = run_predict(input_text, model, tokenizer)
54
+ print("output starcoder java" , output)
55
  else:
56
  a_variable = model_box[model_choice]
57
  output = a_variable(input_text)
58
+ print("output other" , output)
59
  return(output)
60
 
61
 
62
  def run_predict(text, model, tokenizer):
63
  prompt = text
64
+ pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=400)
65
  result = pipe(f"<s>[INST] {prompt} [/INST]")
66
  arr = result[0]['generated_text'].split('[/INST]')
67
  return arr[1]