loubnabnl HF Staff commited on
Commit
416adec
·
1 Parent(s): abba9be

update app

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -4,25 +4,19 @@ from transformers import pipeline
4
 
5
 
6
  title = "CodeParrot Generator 🦜"
7
- description = "This is a subspace to make code generation with [CodeParrot](https://huggingface.co/lvwerra/codeparrot), it is used in a larger [space](loubnabnl/Code-generation-models-v1) for model comparison."
8
  example = [
9
  ["def print_hello_world():", "Sample", 8, 42],
10
  ["def get_file_size(filepath):", "Sample", 22, 42]]
11
  tokenizer = AutoTokenizer.from_pretrained("lvwerra/codeparrot")
12
  model = AutoModelForCausalLM.from_pretrained("lvwerra/codeparrot", low_cpu_mem_usage=True)
13
-
14
 
15
- def code_generation(gen_prompt, strategy, max_tokens, seed=42):
 
16
  set_seed(seed)
17
  gen_kwargs = {}
18
- gen_kwargs["do_sample"] = strategy == "Sample"
19
- gen_kwargs["max_new_tokens"] = max_tokens
20
- if gen_kwargs["do_sample"]:
21
- gen_kwargs["temperature"] = 0.2
22
- gen_kwargs["top_k"] = 0
23
- gen_kwargs["top_p"] = 0.95
24
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
25
- generated_text = pipe(gen_prompt, **gen_kwargs)[0]['generated_text']
26
  return generated_text
27
 
28
 
@@ -30,7 +24,6 @@ iface = gr.Interface(
30
  fn=code_generation,
31
  inputs=[
32
  gr.Textbox(lines=10, label="Input code"),
33
- gr.Dropdown(choices=["Greedy", "Sample"], value="Greedy"),
34
  gr.inputs.Slider(
35
  minimum=8,
36
  maximum=256,
@@ -38,6 +31,13 @@ iface = gr.Interface(
38
  default=8,
39
  label="Number of tokens to generate",
40
  ),
 
 
 
 
 
 
 
41
  gr.inputs.Slider(
42
  minimum=0,
43
  maximum=1000,
 
4
 
5
 
6
  title = "CodeParrot Generator 🦜"
7
+ description = "This is a subspace to make code generation with [CodeParrot](https://huggingface.co/lvwerra/codeparrot), it is used in a larger [space](https://huggingface.co/spaces/loubnabnl/Code-generation-models-v1) for model comparison."
8
  example = [
9
  ["def print_hello_world():", "Sample", 8, 42],
10
  ["def get_file_size(filepath):", "Sample", 22, 42]]
11
  tokenizer = AutoTokenizer.from_pretrained("lvwerra/codeparrot")
12
  model = AutoModelForCausalLM.from_pretrained("lvwerra/codeparrot", low_cpu_mem_usage=True)
 
13
 
14
+
15
+ def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42):
16
  set_seed(seed)
17
  gen_kwargs = {}
 
 
 
 
 
 
18
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
19
+ generated_text = pipe(gen_prompt, do_sample=True, top_p=0.95, temperature=temperature, max_new_tokens=max_tokens)[0]['generated_text']
20
  return generated_text
21
 
22
 
 
24
  fn=code_generation,
25
  inputs=[
26
  gr.Textbox(lines=10, label="Input code"),
 
27
  gr.inputs.Slider(
28
  minimum=8,
29
  maximum=256,
 
31
  default=8,
32
  label="Number of tokens to generate",
33
  ),
34
+ gr.inputs.Slider(
35
+ minimum=0,
36
+ maximum=2,
37
+ step=0.1,
38
+ default=0.6,
39
+ label="Temperature",
40
+ ),
41
  gr.inputs.Slider(
42
  minimum=0,
43
  maximum=1000,