chore: Refactor process_input function to support local and online browsing
Browse files
main.py
CHANGED
@@ -1,3 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from fastapi import FastAPI, UploadFile, File, HTTPException, Form
|
2 |
from fastapi.responses import JSONResponse
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
@@ -5,12 +11,6 @@ from PIL import Image
|
|
5 |
from openai import AsyncOpenAI
|
6 |
from pydantic import BaseModel
|
7 |
from rich import print
|
8 |
-
|
9 |
-
import io
|
10 |
-
import os
|
11 |
-
import multion
|
12 |
-
import torch
|
13 |
-
import instructor
|
14 |
from multion.client import MultiOn
|
15 |
from dotenv import load_dotenv
|
16 |
|
@@ -111,9 +111,9 @@ async def process_input(text: str = Form(...), file: UploadFile = File(None), on
|
|
111 |
command = await generate_command(processed_text)
|
112 |
print(f"Command generated: {command}")
|
113 |
|
114 |
-
if
|
115 |
try:
|
116 |
-
print("Calling MultiOn API with online=
|
117 |
response = multion.browse(
|
118 |
cmd=command.cmd,
|
119 |
url=command.url,
|
|
|
1 |
+
import io
|
2 |
+
import os
|
3 |
+
import multion
|
4 |
+
import torch
|
5 |
+
import instructor
|
6 |
+
|
7 |
from fastapi import FastAPI, UploadFile, File, HTTPException, Form
|
8 |
from fastapi.responses import JSONResponse
|
9 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
11 |
from openai import AsyncOpenAI
|
12 |
from pydantic import BaseModel
|
13 |
from rich import print
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
from multion.client import MultiOn
|
15 |
from dotenv import load_dotenv
|
16 |
|
|
|
111 |
command = await generate_command(processed_text)
|
112 |
print(f"Command generated: {command}")
|
113 |
|
114 |
+
if online and command.local:
|
115 |
try:
|
116 |
+
print(f"Calling MultiOn API with online={online} and local={command.local}")
|
117 |
response = multion.browse(
|
118 |
cmd=command.cmd,
|
119 |
url=command.url,
|