Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -29,11 +29,23 @@ model = model.to(device) # Move model to appropriate device
|
|
29 |
model = model.eval()
|
30 |
|
31 |
# Override the chat function to remove hardcoded .cuda()
|
32 |
-
def modified_chat(
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
return f"Processed input: {input_ids}, OCR Type: {ocr_type}"
|
38 |
|
39 |
# Replace the model's chat method with the modified version
|
@@ -123,28 +135,4 @@ if image_file is not None:
|
|
123 |
st.markdown(highlighted_text)
|
124 |
|
125 |
# Clean up the temporary file after use
|
126 |
-
os.remove(temp_file_path)
|
127 |
-
|
128 |
-
# Override the chat function to remove hardcoded .cuda()
|
129 |
-
def modified_chat(tokenizer, temp_file_path, ocr_type='ocr', *args, **kwargs):
|
130 |
-
# Load the image data, perform OCR and get text
|
131 |
-
with open(temp_file_path, 'rb') as f:
|
132 |
-
image_data = f.read()
|
133 |
-
|
134 |
-
# Assuming OCR process to extract text from image
|
135 |
-
# Use the OCR process here to obtain text. Replace 'extracted_text' with actual extracted text.
|
136 |
-
extracted_text = "some OCR processed text" # Placeholder, replace with actual OCR result
|
137 |
-
|
138 |
-
# Tokenize the extracted text
|
139 |
-
inputs = tokenizer(extracted_text, return_tensors="pt", truncation=True, padding=True)
|
140 |
-
|
141 |
-
# Move input_ids to the appropriate device
|
142 |
-
input_ids = inputs['input_ids'].to(device) # Move to the appropriate device
|
143 |
-
# Perform any necessary processing using the model
|
144 |
-
# Example: res = model(input_ids)
|
145 |
-
|
146 |
-
# Return the processed result
|
147 |
-
return f"Processed input: {input_ids}, OCR Type: {ocr_type}"
|
148 |
-
|
149 |
-
# Replace the model's chat method with the modified version
|
150 |
-
model.chat = modified_chat
|
|
|
29 |
model = model.eval()
|
30 |
|
31 |
# Override the chat function to remove hardcoded .cuda()
|
32 |
+
def modified_chat(tokenizer, temp_file_path, ocr_type='ocr', *args, **kwargs):
|
33 |
+
# Load the image data, perform OCR and get text
|
34 |
+
with open(temp_file_path, 'rb') as f:
|
35 |
+
image_data = f.read()
|
36 |
+
|
37 |
+
# Assuming OCR process to extract text from image
|
38 |
+
extracted_text = "some OCR processed text" # Placeholder, replace with actual OCR result
|
39 |
+
|
40 |
+
# Tokenize the extracted text
|
41 |
+
inputs = tokenizer(extracted_text, return_tensors="pt", truncation=True, padding=True)
|
42 |
+
|
43 |
+
# Move input_ids to the appropriate device
|
44 |
+
input_ids = inputs['input_ids'].to(device) # Use .to(device)
|
45 |
+
|
46 |
+
# Perform any necessary processing using the model
|
47 |
+
# Example: res = model(input_ids) # Uncomment and implement model processing
|
48 |
+
|
49 |
return f"Processed input: {input_ids}, OCR Type: {ocr_type}"
|
50 |
|
51 |
# Replace the model's chat method with the modified version
|
|
|
135 |
st.markdown(highlighted_text)
|
136 |
|
137 |
# Clean up the temporary file after use
|
138 |
+
os.remove(temp_file_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|