Spaces:
Running
Running
Parallel run.
Browse files- demo/sample_generator.ipynb +66 -55
demo/sample_generator.ipynb
CHANGED
@@ -63,6 +63,7 @@
|
|
63 |
"from langchain.prompts import ChatPromptTemplate\n",
|
64 |
"from langchain.chat_models import ChatOpenAI\n",
|
65 |
"from langchain.schema.output_parser import StrOutputParser\n",
|
|
|
66 |
"\n",
|
67 |
"class TaskDescriptionGenerator:\n",
|
68 |
" def __init__(self, model_temperature=0.7):\n",
|
@@ -71,56 +72,56 @@
|
|
71 |
" \n",
|
72 |
" self.description_prompt = ChatPromptTemplate.from_template(\n",
|
73 |
" \"\"\"Given the following JSON example for a task type:\n",
|
74 |
-
"
|
75 |
"\n",
|
76 |
-
"
|
77 |
"\n",
|
78 |
-
"
|
79 |
-
"
|
80 |
-
"
|
81 |
" )\n",
|
82 |
"\n",
|
83 |
" self.briefs_prompt = ChatPromptTemplate.from_template(\n",
|
84 |
" \"\"\"Given the following task type description:\n",
|
85 |
-
"
|
86 |
"\n",
|
87 |
-
"
|
88 |
-
"
|
89 |
"\n",
|
90 |
-
"
|
91 |
-
"
|
92 |
-
"
|
93 |
-
"
|
94 |
" )\n",
|
95 |
"\n",
|
96 |
" self.examples_from_briefs_prompt = ChatPromptTemplate.from_template(\n",
|
97 |
" \"\"\"Given the following task type description, brief description for new examples, \n",
|
98 |
-
"
|
99 |
"\n",
|
100 |
-
"
|
101 |
-
"
|
102 |
"\n",
|
103 |
-
"
|
104 |
-
"
|
105 |
"\n",
|
106 |
-
"
|
107 |
-
"
|
108 |
"\n",
|
109 |
-
"
|
110 |
-
"
|
111 |
-
"
|
112 |
" )\n",
|
113 |
"\n",
|
114 |
" self.examples_prompt = ChatPromptTemplate.from_template(\n",
|
115 |
" \"\"\"Given the following task type description, and input/output example:\n",
|
116 |
-
"
|
117 |
-
"
|
118 |
"\n",
|
119 |
-
"
|
120 |
"\n",
|
121 |
-
"
|
122 |
-
"
|
123 |
-
"
|
124 |
" )\n",
|
125 |
"\n",
|
126 |
" self.description_chain = self.description_prompt | self.model | self.output_parser\n",
|
@@ -178,35 +179,45 @@
|
|
178 |
"\n",
|
179 |
" def process(self, input_str):\n",
|
180 |
" try:\n",
|
181 |
-
" #
|
182 |
-
"
|
183 |
-
"
|
184 |
-
"
|
185 |
-
"
|
186 |
-
"
|
187 |
-
"
|
188 |
-
"
|
189 |
-
"
|
190 |
-
" \n",
|
191 |
-
"
|
192 |
-
"
|
193 |
-
"
|
194 |
-
"
|
195 |
-
"
|
196 |
-
"
|
197 |
-
"
|
198 |
-
"
|
199 |
-
"
|
200 |
-
"
|
201 |
-
"
|
202 |
-
"
|
203 |
-
"
|
204 |
-
"
|
205 |
-
"
|
206 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
" except Exception as e:\n",
|
208 |
" raise RuntimeError(f\"An error occurred during processing: {str(e)}\")\n",
|
209 |
-
" \n",
|
210 |
"\n",
|
211 |
"import gradio as gr\n",
|
212 |
"\n",
|
|
|
63 |
"from langchain.prompts import ChatPromptTemplate\n",
|
64 |
"from langchain.chat_models import ChatOpenAI\n",
|
65 |
"from langchain.schema.output_parser import StrOutputParser\n",
|
66 |
+
"from langchain_core.runnables import RunnablePassthrough, RunnableParallel\n",
|
67 |
"\n",
|
68 |
"class TaskDescriptionGenerator:\n",
|
69 |
" def __init__(self, model_temperature=0.7):\n",
|
|
|
72 |
" \n",
|
73 |
" self.description_prompt = ChatPromptTemplate.from_template(\n",
|
74 |
" \"\"\"Given the following JSON example for a task type:\n",
|
75 |
+
"{raw_example}\n",
|
76 |
"\n",
|
77 |
+
"Provide a concise description of the task type, including the format and style of the output.\n",
|
78 |
"\n",
|
79 |
+
"Format your response as follows:\n",
|
80 |
+
"Task Description: [Your description here]\n",
|
81 |
+
"\"\"\"\n",
|
82 |
" )\n",
|
83 |
"\n",
|
84 |
" self.briefs_prompt = ChatPromptTemplate.from_template(\n",
|
85 |
" \"\"\"Given the following task type description:\n",
|
86 |
+
"Task Description: {description}\n",
|
87 |
"\n",
|
88 |
+
"Generate descriptions for 3 new examples with detailed attributes\n",
|
89 |
+
"based on this task type.\n",
|
90 |
"\n",
|
91 |
+
"Format your response as a valid YAML object with a single key\n",
|
92 |
+
"'brief_descriptions' containing a YAML array of 3 objects, each\n",
|
93 |
+
"with a 'description' field.\n",
|
94 |
+
"\"\"\"\n",
|
95 |
" )\n",
|
96 |
"\n",
|
97 |
" self.examples_from_briefs_prompt = ChatPromptTemplate.from_template(\n",
|
98 |
" \"\"\"Given the following task type description, brief description for new examples, \n",
|
99 |
+
"and JSON example:\n",
|
100 |
"\n",
|
101 |
+
"Task Description: {description}\n",
|
102 |
+
"Brief Description: {brief_description}\n",
|
103 |
"\n",
|
104 |
+
"Example:\n",
|
105 |
+
"{raw_example}\n",
|
106 |
"\n",
|
107 |
+
"Generate 3 more input/output examples for this task type, based on the brief\n",
|
108 |
+
"description, in the same JSON format.\n",
|
109 |
"\n",
|
110 |
+
"Format your response as a valid JSON object with a single key 'examples' \n",
|
111 |
+
"containing a JSON array of 3 objects, each with 'input' and 'output' fields.\n",
|
112 |
+
"\"\"\"\n",
|
113 |
" )\n",
|
114 |
"\n",
|
115 |
" self.examples_prompt = ChatPromptTemplate.from_template(\n",
|
116 |
" \"\"\"Given the following task type description, and input/output example:\n",
|
117 |
+
"Task Description: {description}\n",
|
118 |
+
"Example: {raw_example}\n",
|
119 |
"\n",
|
120 |
+
"Generate 3 new input/output examples for this task type.\n",
|
121 |
"\n",
|
122 |
+
"Format your response as a valid JSON object with a single key 'examples' \n",
|
123 |
+
"containing a JSON array of 3 objects, each with 'input' and 'output' fields.\n",
|
124 |
+
"\"\"\"\n",
|
125 |
" )\n",
|
126 |
"\n",
|
127 |
" self.description_chain = self.description_prompt | self.model | self.output_parser\n",
|
|
|
179 |
"\n",
|
180 |
" def process(self, input_str):\n",
|
181 |
" try:\n",
|
182 |
+
" # Parse input\n",
|
183 |
+
" parse_input = (\n",
|
184 |
+
" RunnablePassthrough()\n",
|
185 |
+
" | (lambda x: json.loads(x))\n",
|
186 |
+
" | (lambda x: x if isinstance(x, dict) and 'input' in x and 'output' in x \n",
|
187 |
+
" else (_ for _ in ()).throw(ValueError(\"Invalid input format. Expected an object with 'input' and 'output' fields.\")))\n",
|
188 |
+
" )\n",
|
189 |
+
"\n",
|
190 |
+
" # Create the chain\n",
|
191 |
+
" chain = (\n",
|
192 |
+
" parse_input\n",
|
193 |
+
" | (lambda x: json.dumps(x, ensure_ascii=False))\n",
|
194 |
+
" | {\n",
|
195 |
+
" \"description\": self.description_chain,\n",
|
196 |
+
" \"raw_example\": lambda x: x\n",
|
197 |
+
" }\n",
|
198 |
+
" | {\n",
|
199 |
+
" \"examples_from_briefs\": {\n",
|
200 |
+
" \"description\": lambda x: x[\"description\"],\n",
|
201 |
+
" \"brief_description\": lambda x: self.briefs_chain.invoke({\"description\": x[\"description\"]}),\n",
|
202 |
+
" \"raw_example\": lambda x: x[\"raw_example\"]\n",
|
203 |
+
" } | self.examples_from_briefs_chain,\n",
|
204 |
+
" \"examples\": self.examples_chain\n",
|
205 |
+
" }\n",
|
206 |
+
" | {\n",
|
207 |
+
" \"additional_examples\": lambda x: (\n",
|
208 |
+
" list(json.loads(x[\"examples_from_briefs\"])[\"examples\"])\n",
|
209 |
+
" + list(json.loads(x[\"examples\"])[\"examples\"])\n",
|
210 |
+
" )\n",
|
211 |
+
" }\n",
|
212 |
+
" | (lambda x: json.dumps(x, indent=2, ensure_ascii=False))\n",
|
213 |
+
" )\n",
|
214 |
+
"\n",
|
215 |
+
" # Invoke the chain\n",
|
216 |
+
" result = chain.invoke(input_str)\n",
|
217 |
+
" return result\n",
|
218 |
+
"\n",
|
219 |
" except Exception as e:\n",
|
220 |
" raise RuntimeError(f\"An error occurred during processing: {str(e)}\")\n",
|
|
|
221 |
"\n",
|
222 |
"import gradio as gr\n",
|
223 |
"\n",
|