yaleh commited on
Commit
8f7e2cd
·
1 Parent(s): fd8d9a5

Add parameter generating_batch_size and updated UI.

Browse files
Files changed (1) hide show
  1. demo/sample_generator.ipynb +112 -132
demo/sample_generator.ipynb CHANGED
@@ -11,6 +11,10 @@
11
  "# Load configuration from YAML file\n",
12
  "config = {\n",
13
  " \"model_name\": \"llama3-70b-8192\",\n",
 
 
 
 
14
  "}\n"
15
  ]
16
  },
@@ -20,11 +24,23 @@
20
  "metadata": {},
21
  "outputs": [
22
  {
23
- "ename": "SyntaxError",
24
- "evalue": "invalid syntax (4272045825.py, line 148)",
 
 
 
 
 
 
 
 
25
  "output_type": "error",
26
  "traceback": [
27
- "\u001b[0;36m Cell \u001b[0;32mIn[2], line 148\u001b[0;36m\u001b[0m\n\u001b[0;31m \"additional_examples\": lambda x: (\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m invalid syntax\n"
 
 
 
 
28
  ]
29
  }
30
  ],
@@ -34,15 +50,12 @@
34
  "from langchain.prompts import ChatPromptTemplate\n",
35
  "from langchain.chat_models import ChatOpenAI\n",
36
  "from langchain.schema.output_parser import StrOutputParser\n",
37
- "from langchain_core.runnables import RunnablePassthrough, RunnableParallel\n",
 
 
38
  "\n",
39
- "class TaskDescriptionGenerator:\n",
40
- " def __init__(self, model_temperature=0.7):\n",
41
- " self.model = ChatOpenAI(model=config[\"model_name\"], temperature=model_temperature)\n",
42
- " self.output_parser = StrOutputParser()\n",
43
- " \n",
44
- " self.description_prompt = ChatPromptTemplate.from_template(\n",
45
- " \"\"\"Given the following JSON example for a task type:\n",
46
  "{raw_example}\n",
47
  "\n",
48
  "Provide a concise description of the task type, including the format and style of the output.\n",
@@ -50,142 +63,103 @@
50
  "Format your response as follows:\n",
51
  "Task Description: [Your description here]\n",
52
  "\"\"\"\n",
53
- " )\n",
54
  "\n",
55
- " self.briefs_prompt = ChatPromptTemplate.from_template(\n",
56
- " \"\"\"Given the following task type description:\n",
57
- "Task Description: {description}\n",
58
- "\n",
59
- "Generate descriptions for 3 new examples with detailed attributes\n",
60
- "based on this task type.\n",
61
  "\n",
62
  "Format your response as a valid YAML object with a single key\n",
63
- "'brief_descriptions' containing a YAML array of 3 objects, each\n",
64
- "with a 'description' field.\n",
65
- "\"\"\"\n",
66
- " )\n",
67
  "\n",
68
- " self.examples_from_briefs_prompt = ChatPromptTemplate.from_template(\n",
69
- " \"\"\"Given the following task type description, brief description for new examples, \n",
70
- "and JSON example:\n",
71
  "\n",
72
- "Task Description: {description}\n",
73
- "Brief Description: {brief_description}\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  "\n",
75
  "Example:\n",
76
- "{raw_example}\n",
77
  "\n",
78
- "Generate 3 more input/output examples for this task type, based on the brief\n",
79
- "description, in the same JSON format.\n",
80
  "\n",
81
- "Format your response as a valid JSON object with a single key 'examples' \n",
82
- "containing a JSON array of 3 objects, each with 'input' and 'output' fields.\n",
83
- "\"\"\"\n",
84
- " )\n",
85
  "\n",
86
- " self.examples_prompt = ChatPromptTemplate.from_template(\n",
87
- " \"\"\"Given the following task type description, and input/output example:\n",
88
  "Task Description: {description}\n",
89
  "Example: {raw_example}\n",
90
  "\n",
91
- "Generate 3 new input/output examples for this task type.\n",
92
  "\n",
93
  "Format your response as a valid JSON object with a single key 'examples' \n",
94
- "containing a JSON array of 3 objects, each with 'input' and 'output' fields.\n",
95
  "\"\"\"\n",
96
- " )\n",
97
  "\n",
98
- " self.description_chain = self.description_prompt | self.model | self.output_parser\n",
99
- " self.briefs_chain = self.briefs_prompt | self.model | self.output_parser\n",
100
- " # bind json_object to the model\n",
101
- " json_model = self.model.bind(response_format={\"type\": \"json_object\"})\n",
102
- " self.examples_from_briefs_chain = self.examples_from_briefs_prompt | json_model | self.output_parser\n",
103
- " self.examples_chain = self.examples_prompt | json_model | self.output_parser\n",
104
- "\n",
105
- " def generate_description(self, raw_example_str):\n",
106
- " result = self.description_chain.invoke({\n",
107
- " \"raw_example\": raw_example_str\n",
108
- " })\n",
109
- " return result.split(\"Task Description: \")[1].strip()\n",
110
- "\n",
111
- " def generate_briefs(self, description):\n",
112
- " result = self.briefs_chain.invoke({\n",
113
- " \"description\": description\n",
114
- " })\n",
115
- " # return result.split(\"Brief Description: \")[1].strip()\n",
116
- " return result\n",
117
- "\n",
118
- " def generate_examples_from_briefs(self, description, brief_description, raw_example_str):\n",
119
- " result = self.examples_from_briefs_chain.invoke({\n",
120
- " \"description\": description,\n",
121
- " \"brief_description\": brief_description,\n",
122
- " \"raw_example\": raw_example_str\n",
123
- " })\n",
 
 
 
 
 
 
 
124
  "\n",
 
125
  " try:\n",
126
- " result = result.strip()\n",
127
- " if result.startswith('```') and result.endswith('```'):\n",
128
- " result = result.strip('```').strip()\n",
129
- " if result.startswith('json'):\n",
130
- " result = result.strip('json').strip()\n",
131
- " return json.loads(result)\n",
132
- " except json.JSONDecodeError as e:\n",
133
- " raise ValueError(f\"The generated examples are not in valid JSON format. Error: {str(e)} Result: {result}\")\n",
134
- " \n",
135
- " def generate_examples(self, description, example):\n",
136
- " result = self.examples_chain.invoke({\n",
137
- " \"description\": description,\n",
138
- " \"example\": example\n",
139
- " })\n",
140
- " \n",
141
- " try:\n",
142
- " result = result.strip()\n",
143
- " if result.startswith('```') and result.endswith('```'):\n",
144
- " result = result.strip('```').strip()\n",
145
- " if result.startswith('json'):\n",
146
- " result = result.strip('json').strip()\n",
147
- " return json.loads(result)\n",
148
- " except json.JSONDecodeError as e:\n",
149
- " raise ValueError(f\"The generated examples are not in valid JSON format. Error: {str(e)} Result: {result}\")\n",
150
- "\n",
151
- " def process(self, input_str):\n",
152
- " try:\n",
153
- " # Parse input\n",
154
- " parse_input = (\n",
155
- " RunnablePassthrough()\n",
156
- " | (lambda x: json.loads(x))\n",
157
- " | (lambda x: x if isinstance(x, dict) and 'input' in x and 'output' in x \n",
158
- " else (_ for _ in ()).throw(ValueError(\"Invalid input format. Expected an object with 'input' and 'output' fields.\")))\n",
159
- " )\n",
160
  "\n",
161
- " # Create the chain\n",
162
- " chain = (\n",
163
- " parse_input\n",
164
- " | (lambda x: json.dumps(x, ensure_ascii=False))\n",
165
- " | {\n",
166
- " \"description\": self.description_chain,\n",
167
- " \"raw_example\": lambda x: x\n",
168
- " }\n",
169
- " | {\n",
170
- " \"description\": lambda x: x[\"description\"],\n",
171
- " \"examples_from_briefs\": {\n",
172
- " \"description\": lambda x: x[\"description\"],\n",
173
- " \"brief_description\": lambda x: self.briefs_chain.invoke({\"description\": x[\"description\"]}),\n",
174
- " \"raw_example\": lambda x: x[\"raw_example\"]\n",
175
- " } | self.examples_from_briefs_chain,\n",
176
- " \"examples\": self.examples_chain\n",
177
- " }\n",
178
- " | RunnablePassthrough.assign(\n",
179
- " additional_examples = lambda x: (\n",
180
- " list(json.loads(x[\"examples_from_briefs\"])[\"examples\"])\n",
181
- " + list(json.loads(x[\"examples\"])[\"examples\"])\n",
182
- " )\n",
183
- " )\n",
184
- " | (lambda x: json.dumps(x, indent=2, ensure_ascii=False))\n",
185
- " )\n",
186
  "\n",
187
- " # Invoke the chain\n",
188
- " result = chain.invoke(input_str)\n",
189
  " return result\n",
190
  "\n",
191
  " except Exception as e:\n",
@@ -193,18 +167,24 @@
193
  "\n",
194
  "import gradio as gr\n",
195
  "\n",
196
- "def process_json(input_json):\n",
197
  " try:\n",
198
  " generator = TaskDescriptionGenerator()\n",
199
- " result = generator.process(input_json)\n",
200
- " return \"Process completed successfully. Result:\\n\" + result\n",
 
 
 
201
  " except Exception as e:\n",
202
- " return f\"An error occurred: {str(e)}\"\n",
203
  "\n",
204
  "demo = gr.Interface(\n",
205
  " fn=process_json,\n",
206
- " inputs=gr.Textbox(label=\"Input JSON\"),\n",
207
- " outputs=gr.Textbox(label=\"Output\"),\n",
 
 
 
208
  " title=\"Task Description Generator\",\n",
209
  " description=\"Enter a JSON object with 'input' and 'output' fields to generate a task description and additional examples.\"\n",
210
  ")\n",
 
11
  "# Load configuration from YAML file\n",
12
  "config = {\n",
13
  " \"model_name\": \"llama3-70b-8192\",\n",
14
+ " # \"model_name\": \"llama3-8b-8192\",\n",
15
+ " # \"model_name\": \"llama-3.1-70b-versatile\",\n",
16
+ " # \"model_name\": \"llama-3.1-8b-instant\",\n",
17
+ " # \"model_name\": \"gemma2-9b-it\",\n",
18
  "}\n"
19
  ]
20
  },
 
24
  "metadata": {},
25
  "outputs": [
26
  {
27
+ "name": "stderr",
28
+ "output_type": "stream",
29
+ "text": [
30
+ "/home/yale/work/meta-prompt/.venv/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
31
+ " from .autonotebook import tqdm as notebook_tqdm\n"
32
+ ]
33
+ },
34
+ {
35
+ "ename": "TypeError",
36
+ "evalue": "Dataframe.__init__() got an unexpected keyword argument 'format'",
37
  "output_type": "error",
38
  "traceback": [
39
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
40
+ "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
41
+ "Cell \u001b[0;32mIn[2], line 139\u001b[0m\n\u001b[1;32m 131\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 132\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m gr\u001b[38;5;241m.\u001b[39mError(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAn error occurred: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mstr\u001b[39m(e)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 134\u001b[0m demo \u001b[38;5;241m=\u001b[39m gr\u001b[38;5;241m.\u001b[39mInterface(\n\u001b[1;32m 135\u001b[0m fn\u001b[38;5;241m=\u001b[39mprocess_json,\n\u001b[1;32m 136\u001b[0m inputs\u001b[38;5;241m=\u001b[39m[gr\u001b[38;5;241m.\u001b[39mTextbox(label\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mInput JSON\u001b[39m\u001b[38;5;124m\"\u001b[39m),\n\u001b[1;32m 137\u001b[0m gr\u001b[38;5;241m.\u001b[39mSlider(label\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mGenerating Batch Size\u001b[39m\u001b[38;5;124m\"\u001b[39m, value\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m3\u001b[39m, minimum\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m, maximum\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m10\u001b[39m, step\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m)],\n\u001b[1;32m 138\u001b[0m outputs\u001b[38;5;241m=\u001b[39m[gr\u001b[38;5;241m.\u001b[39mTextbox(label\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDescription\u001b[39m\u001b[38;5;124m\"\u001b[39m),\n\u001b[0;32m--> 139\u001b[0m \u001b[43mgr\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mDataFrame\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlabel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mExamples\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m 140\u001b[0m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mInput\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mOutput\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 141\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mformat\u001b[39;49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmarkdown\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m],\n\u001b[1;32m 142\u001b[0m title\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTask Description Generator\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 143\u001b[0m description\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mEnter a JSON object with \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124minput\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m and \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124moutput\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m fields to generate a task description and additional examples.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 144\u001b[0m )\n\u001b[1;32m 146\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;18m__name__\u001b[39m \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m__main__\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m 147\u001b[0m demo\u001b[38;5;241m.\u001b[39mlaunch()\n",
42
+ "File \u001b[0;32m~/work/meta-prompt/.venv/lib/python3.10/site-packages/gradio/component_meta.py:163\u001b[0m, in \u001b[0;36mupdateable.<locals>.wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 161\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 162\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 163\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
43
+ "\u001b[0;31mTypeError\u001b[0m: Dataframe.__init__() got an unexpected keyword argument 'format'"
44
  ]
45
  }
46
  ],
 
50
  "from langchain.prompts import ChatPromptTemplate\n",
51
  "from langchain.chat_models import ChatOpenAI\n",
52
  "from langchain.schema.output_parser import StrOutputParser\n",
53
+ "from langchain_core.runnables import RunnablePassthrough, RunnableParallel, RunnableLambda\n",
54
+ "from langchain_core.output_parsers import JsonOutputParser\n",
55
+ "from langchain.output_parsers import YamlOutputParser\n",
56
  "\n",
57
+ "# Define prompt strings as constants\n",
58
+ "DESCRIPTION_PROMPT = \"\"\"Given the following JSON example for a task type:\n",
 
 
 
 
 
59
  "{raw_example}\n",
60
  "\n",
61
  "Provide a concise description of the task type, including the format and style of the output.\n",
 
63
  "Format your response as follows:\n",
64
  "Task Description: [Your description here]\n",
65
  "\"\"\"\n",
 
66
  "\n",
67
+ "BRIEFS_PROMPT = [\n",
68
+ " (\"system\", \"\"\"Given the task type description, generate descriptions for {generating_batch_size} new \n",
69
+ "examples with detailed attributes based on this task type. But don't provide any detailed task output.\n",
 
 
 
70
  "\n",
71
  "Format your response as a valid YAML object with a single key\n",
72
+ "'new_example_briefs' containing a YAML array of {generating_batch_size} objects, each\n",
73
+ "with a 'example_brief' field.\n",
74
+ "\"\"\"),\n",
75
+ " (\"user\", \"\"\"Task Description:\n",
76
  "\n",
77
+ "{description}\n",
 
 
78
  "\n",
79
+ "\"\"\")\n",
80
+ "]\n",
81
+ "\n",
82
+ "EXAMPLES_FROM_BRIEFS_PROMPT = [\n",
83
+ " (\"system\", \"\"\"Given the task type description, brief descriptions for new examples, \n",
84
+ "and JSON example, generate 3 more input/output examples for this task type,\n",
85
+ "strictly based on the brief descriptions. Ensure that the new examples are\n",
86
+ "consistent with the brief descriptions and do not introduce any new information\n",
87
+ "not present in the briefs.\n",
88
+ "\n",
89
+ "Format your response as a valid JSON object with a single key 'examples' \n",
90
+ "containing a JSON array of {generating_batch_size} objects, each with 'input' and 'output' fields.\n",
91
+ "\"\"\"),\n",
92
+ " (\"user\", \"\"\"Task Description:\n",
93
+ "\n",
94
+ "{description}\n",
95
+ "\n",
96
+ "New Example Briefs: \n",
97
+ "\n",
98
+ "{new_example_briefs}\n",
99
  "\n",
100
  "Example:\n",
 
101
  "\n",
102
+ "{raw_example}\n",
 
103
  "\n",
104
+ "\"\"\")\n",
105
+ "]\n",
 
 
106
  "\n",
107
+ "EXAMPLES_PROMPT = \"\"\"Given the following task type description, and input/output example:\n",
 
108
  "Task Description: {description}\n",
109
  "Example: {raw_example}\n",
110
  "\n",
111
+ "Generate {generating_batch_size} new input/output examples for this task type.\n",
112
  "\n",
113
  "Format your response as a valid JSON object with a single key 'examples' \n",
114
+ "containing a JSON array of {generating_batch_size} objects, each with 'input' and 'output' fields.\n",
115
  "\"\"\"\n",
 
116
  "\n",
117
+ "class TaskDescriptionGenerator:\n",
118
+ " def __init__(self, model_temperature=0.7): \n",
119
+ " self.description_prompt = ChatPromptTemplate.from_template(DESCRIPTION_PROMPT)\n",
120
+ " self.briefs_prompt = ChatPromptTemplate.from_messages(BRIEFS_PROMPT)\n",
121
+ " self.examples_from_briefs_prompt = ChatPromptTemplate.from_messages(EXAMPLES_FROM_BRIEFS_PROMPT)\n",
122
+ " self.examples_prompt = ChatPromptTemplate.from_template(EXAMPLES_PROMPT)\n",
123
+ "\n",
124
+ " model = ChatOpenAI(model=config[\"model_name\"], temperature=model_temperature)\n",
125
+ " json_model = model.bind(response_format={\"type\": \"json_object\"})\n",
126
+ "\n",
127
+ " output_parser = StrOutputParser()\n",
128
+ " json_parse = JsonOutputParser()\n",
129
+ "\n",
130
+ " self.description_chain = self.description_prompt | model | output_parser\n",
131
+ " self.briefs_chain = self.briefs_prompt | model | output_parser\n",
132
+ " self.examples_from_briefs_chain = self.examples_from_briefs_prompt | json_model | json_parse\n",
133
+ " self.examples_chain = self.examples_prompt | json_model | json_parse\n",
134
+ "\n",
135
+ " self.chain = (\n",
136
+ " RunnablePassthrough.assign(raw_example = lambda x: json.dumps(x[\"example\"], ensure_ascii=False))\n",
137
+ " | RunnablePassthrough.assign(description = self.description_chain)\n",
138
+ " | {\n",
139
+ " \"description\": lambda x: x[\"description\"],\n",
140
+ " \"examples_from_briefs\": RunnablePassthrough.assign(new_example_briefs = lambda x: self.briefs_chain.invoke(x)) | self.examples_from_briefs_chain,\n",
141
+ " \"examples\": self.examples_chain\n",
142
+ " }\n",
143
+ " | RunnablePassthrough.assign(\n",
144
+ " additional_examples=lambda x: (\n",
145
+ " list(x[\"examples_from_briefs\"][\"examples\"])\n",
146
+ " + list(x[\"examples\"][\"examples\"])\n",
147
+ " )\n",
148
+ " )\n",
149
+ " )\n",
150
  "\n",
151
+ " def process(self, input_str, generating_batch_size=3):\n",
152
  " try:\n",
153
+ " # Parse input string to a dictionary\n",
154
+ " example_dict = json.loads(input_str) if input_str.startswith('{') else yaml.safe_load(input_str)\n",
155
+ " if not isinstance(example_dict, dict) or 'input' not in example_dict or 'output' not in example_dict:\n",
156
+ " raise ValueError(\"Invalid input format. Expected an object with 'input' and 'output' fields.\")\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  "\n",
158
+ " # Move the original content to a key named 'example'\n",
159
+ " input_dict = {\"example\": example_dict, \"generating_batch_size\": generating_batch_size}\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  "\n",
161
+ " # Invoke the chain with the parsed input dictionary\n",
162
+ " result = self.chain.invoke(input_dict)\n",
163
  " return result\n",
164
  "\n",
165
  " except Exception as e:\n",
 
167
  "\n",
168
  "import gradio as gr\n",
169
  "\n",
170
+ "def process_json(input_json, generating_batch_size=3):\n",
171
  " try:\n",
172
  " generator = TaskDescriptionGenerator()\n",
173
+ " result = generator.process(input_json, generating_batch_size)\n",
174
+ " description = result[\"description\"]\n",
175
+ " examples = [[example[\"input\"], example[\"output\"]] for example in result[\"additional_examples\"]]\n",
176
+ " print(examples)\n",
177
+ " return description, examples\n",
178
  " except Exception as e:\n",
179
+ " raise gr.Error(f\"An error occurred: {str(e)}\")\n",
180
  "\n",
181
  "demo = gr.Interface(\n",
182
  " fn=process_json,\n",
183
+ " inputs=[gr.Textbox(label=\"Input JSON\"),\n",
184
+ " gr.Slider(label=\"Generating Batch Size\", value=3, minimum=1, maximum=10, step=1)],\n",
185
+ " outputs=[gr.Textbox(label=\"Description\"),\n",
186
+ " gr.DataFrame(label=\"Examples\", \n",
187
+ " headers=[\"Input\", \"Output\"])],\n",
188
  " title=\"Task Description Generator\",\n",
189
  " description=\"Enter a JSON object with 'input' and 'output' fields to generate a task description and additional examples.\"\n",
190
  ")\n",