drietsch commited on
Commit
5446d2d
·
verified ·
1 Parent(s): 81923a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -21
app.py CHANGED
@@ -1,13 +1,34 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
- import onnxruntime as ort
4
 
5
- # Load the Phi-3.5-mini-instruct model and tokenizer
 
 
 
6
  model_name = "microsoft/Phi-3.5-mini-instruct"
 
 
 
 
 
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
9
- # Load the ONNX model
10
- session = ort.InferenceSession(f"{model_name}/model.onnx")
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  # Simple HTML template for the website
13
  simple_website_template = """
@@ -41,22 +62,21 @@ simple_website_template = """
41
 
42
  # Function to generate personalized content using Phi-3.5-mini-instruct
43
  def personalize_website_llm(persona_text):
44
- # Create a prompt for the model
45
- prompt = f"Generate personalized website content for the following persona: {persona_text}. Provide a title and main content."
 
 
 
46
 
47
- # Tokenize the prompt
48
- inputs = tokenizer(prompt, return_tensors="np")
 
 
 
 
 
 
49
 
50
- # Run the ONNX model
51
- ort_inputs = {session.get_inputs()[0].name: inputs["input_ids"]}
52
- ort_outs = session.run(None, ort_inputs)
53
-
54
- # Decode the output
55
- generated_text = tokenizer.decode(ort_outs[0][0], skip_special_tokens=True)
56
-
57
- # Split the response into a title and content
58
- title, content = generated_text.split('\n', 1)
59
-
60
  # Set the title color and font size based on simple heuristics
61
  title_color = "#333"
62
  font_size = 16
@@ -73,8 +93,8 @@ def personalize_website_llm(persona_text):
73
  personalized_website = simple_website_template.format(
74
  title_color=title_color,
75
  font_size=font_size,
76
- title=title.strip(),
77
- content=content.strip()
78
  )
79
 
80
  return personalized_website
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
+ import torch
4
 
5
+ # Set the random seed for reproducibility
6
+ torch.random.manual_seed(0)
7
+
8
+ # Load the model and tokenizer
9
  model_name = "microsoft/Phi-3.5-mini-instruct"
10
+ model = AutoModelForCausalLM.from_pretrained(
11
+ model_name,
12
+ device_map="auto",
13
+ torch_dtype="auto",
14
+ trust_remote_code=True
15
+ )
16
  tokenizer = AutoTokenizer.from_pretrained(model_name)
17
 
18
+ # Set up the pipeline
19
+ pipe = pipeline(
20
+ "text-generation",
21
+ model=model,
22
+ tokenizer=tokenizer,
23
+ )
24
+
25
+ # Define the generation arguments
26
+ generation_args = {
27
+ "max_new_tokens": 150,
28
+ "return_full_text": False,
29
+ "temperature": 0.7,
30
+ "do_sample": False,
31
+ }
32
 
33
  # Simple HTML template for the website
34
  simple_website_template = """
 
62
 
63
  # Function to generate personalized content using Phi-3.5-mini-instruct
64
  def personalize_website_llm(persona_text):
65
+ # Construct the conversation history
66
+ messages = [
67
+ {"role": "system", "content": "You are a helpful AI assistant that personalizes content for websites."},
68
+ {"role": "user", "content": f"Persona: {persona_text}. Generate a personalized website content including a title and a paragraph."},
69
+ ]
70
 
71
+ # Generate content using the pipeline
72
+ output = pipe(messages, **generation_args)
73
+ generated_text = output[0]['generated_text'].strip()
74
+
75
+ # Simple heuristic to split title and content
76
+ lines = generated_text.split('\n')
77
+ title = lines[0]
78
+ content = "\n".join(lines[1:])
79
 
 
 
 
 
 
 
 
 
 
 
80
  # Set the title color and font size based on simple heuristics
81
  title_color = "#333"
82
  font_size = 16
 
93
  personalized_website = simple_website_template.format(
94
  title_color=title_color,
95
  font_size=font_size,
96
+ title=title,
97
+ content=content
98
  )
99
 
100
  return personalized_website