tboen1 commited on
Commit
48e3923
·
verified ·
1 Parent(s): 9e686cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -35
app.py CHANGED
@@ -5,40 +5,33 @@ import os
5
 
6
  tg.set_backward_engine("gpt-4o", override=True)
7
 
8
- # Step 1: Get an initial response from an LLM.
9
- engine = tg.get_engine(engine_name='gpt-4o')
10
- model = tg.BlackboxLLM(engine)
11
-
12
- question_string = ("If it takes 1 hour to dry 25 shirts under the sun, "
13
- "how long will it take to dry 30 shirts under the sun? "
14
- "Reason step by step")
15
-
16
- question = tg.Variable(question_string,
17
- role_description="question to the LLM",
18
- requires_grad=False)
19
-
20
- answer = model(question)
21
- st.write(answer)
22
-
23
- answer.set_role_description("concise and accurate answer to the question")
24
-
25
- # Step 2: Define the loss function and the optimizer, just like in PyTorch!
26
- # Here, we don't have SGD, but we have TGD (Textual Gradient Descent)
27
- # that works with "textual gradients".
28
- optimizer = tg.TGD(parameters=[answer])
29
- evaluation_instruction = (f"Here's a question: {question_string}. "
30
- "Evaluate any given answer to this question, "
31
- "be smart, logical, and very critical. "
32
- "Just provide concise feedback.")
33
-
34
- # TextLoss is a natural-language specified loss function that describes
35
- # how we want to evaluate the reasoning.
36
- loss_fn = tg.TextLoss(eval_system_prompt = evaluation_instruction)
37
-
38
- # Step 3: Do the loss computation, backward pass, and update the punchline.
39
- # Exact same syntax as PyTorch!
40
- loss = loss_fn(answer)
41
- st.write(loss)
42
  loss.backward()
43
  optimizer.step()
44
- st.write(answer)
 
5
 
6
  tg.set_backward_engine("gpt-4o", override=True)
7
 
8
+ import textgrad as tg
9
+ tg.set_backward_engine(tg.get_engine("gpt-4o"))
10
+
11
+ initial_solution = """To solve the equation 3x^2 - 7x + 2 = 0, we use the quadratic formula:
12
+ x = (-b ± √(b^2 - 4ac)) / 2a
13
+ a = 3, b = -7, c = 2
14
+ x = (7 ± √((-7)^2 + 4(3)(2))) / 6
15
+ x = (7 ± √73) / 6
16
+ The solutions are:
17
+ x1 = (7 + √73)
18
+ x2 = (7 - √73)"""
19
+
20
+ solution = tg.Variable(initial_solution,
21
+ requires_grad=True,
22
+ role_description="solution to the math question")
23
+
24
+ loss_system_prompt = tg.Variable("""You will evaluate a solution to a math question.
25
+ Do not attempt to solve it yourself, do not give a solution, only identify errors. Be super concise.""",
26
+ requires_grad=False,
27
+ role_description="system prompt")
28
+
29
+ loss_fn = tg.TextLoss(loss_system_prompt)
30
+ optimizer = tg.TGD([solution])
31
+
32
+ loss = loss_fn(solution)
33
+ st.write(loss.value)
34
+
 
 
 
 
 
 
 
35
  loss.backward()
36
  optimizer.step()
37
+ st.write(solution.value)