hanzla commited on
Commit
bb0b5aa
·
1 Parent(s): 057d644

sliders added

Browse files
Files changed (1) hide show
  1. app.py +2 -30
app.py CHANGED
@@ -23,14 +23,6 @@ device = "cuda"
23
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
24
  model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
25
 
26
- model_llm = AutoModelForCausalLM.from_pretrained(
27
- "microsoft/Phi-3-mini-128k-instruct",
28
- device_map="cuda",
29
- torch_dtype="auto",
30
- trust_remote_code=True,
31
- )
32
- tokenizer_llm = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct")
33
-
34
  pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16).to(device)
35
  scheduler = DDIMScheduler.from_pretrained(
36
  model_id,
@@ -47,26 +39,6 @@ def generate_video(prompt,negative_prompt, guidance_scale, num_inference_steps,
47
 
48
  pipe.to(device)
49
 
50
- messages = [
51
- {"role": "user", "content": "You have to complete my given prompt into a complete description. The purpose of this description is to descibe a video generation. Follow the order of prompt. My Prompt: " + prompt},
52
- ]
53
-
54
- pipe_llm = pipeline(
55
- "text-generation",
56
- model=model_llm,
57
- tokenizer=tokenizer_llm,
58
- device_map='auto'
59
- )
60
- generation_args = {
61
- "max_new_tokens": 512,
62
- "return_full_text": False,
63
- "temperature": 0.0,
64
- "do_sample": False,
65
- }
66
-
67
- output = pipe_llm(messages, **generation_args)
68
- print(output[0]['generated_text'])
69
-
70
  # Set adapters based on user selection
71
  if adapter_choices:
72
  for i in range(len(adapter_choices)):
@@ -78,7 +50,7 @@ def generate_video(prompt,negative_prompt, guidance_scale, num_inference_steps,
78
  print(adapter_choices)
79
 
80
  output = pipe(
81
- prompt=output[0]['generated_text'],
82
  negative_prompt=negative_prompt,
83
  num_frames=16,
84
  guidance_scale=guidance_scale,
@@ -92,7 +64,7 @@ def generate_video(prompt,negative_prompt, guidance_scale, num_inference_steps,
92
 
93
 
94
  iface = gr.Interface(
95
- theme=gr.themes.Soft(primary_hue="red", secondary_hue="pink"),
96
  fn=generate_video,
97
  inputs=[
98
  gr.Textbox(label="Enter your prompt"),
 
23
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
24
  model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
25
 
 
 
 
 
 
 
 
 
26
  pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16).to(device)
27
  scheduler = DDIMScheduler.from_pretrained(
28
  model_id,
 
39
 
40
  pipe.to(device)
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  # Set adapters based on user selection
43
  if adapter_choices:
44
  for i in range(len(adapter_choices)):
 
50
  print(adapter_choices)
51
 
52
  output = pipe(
53
+ prompt=prompt,
54
  negative_prompt=negative_prompt,
55
  num_frames=16,
56
  guidance_scale=guidance_scale,
 
64
 
65
 
66
  iface = gr.Interface(
67
+ theme=gr.themes.Soft(primary_hue="indigo", secondary_hue="violet"),
68
  fn=generate_video,
69
  inputs=[
70
  gr.Textbox(label="Enter your prompt"),