stillerman commited on
Commit
1a43738
·
1 Parent(s): e40b300

error handling

Browse files
src/components/game-component.tsx CHANGED
@@ -65,7 +65,7 @@ const Switch = ({
65
  };
66
 
67
  type Message = {
68
- role: "user" | "assistant" | "game" | "result";
69
  content: string;
70
  metadata?: {
71
  page?: string;
@@ -145,7 +145,7 @@ export default function GameComponent({
145
  const {
146
  status: modelStatus,
147
  partialText,
148
- inference,
149
  } = useInference({
150
  apiKey:
151
  window.localStorage.getItem("huggingface_access_token") || undefined,
@@ -228,12 +228,22 @@ export default function GameComponent({
228
  content: prompt,
229
  });
230
 
231
- const modelResponse = await inference({
232
  model: model,
233
  prompt,
234
  maxTokens: maxTokens,
235
  });
236
 
 
 
 
 
 
 
 
 
 
 
237
  pushConvo({
238
  role: "assistant",
239
  content: modelResponse,
@@ -703,6 +713,12 @@ export default function GameComponent({
703
  </div>
704
  </div>
705
  );
 
 
 
 
 
 
706
  }
707
 
708
  return null;
 
65
  };
66
 
67
  type Message = {
68
+ role: "user" | "assistant" | "game" | "result" | "error";
69
  content: string;
70
  metadata?: {
71
  page?: string;
 
145
  const {
146
  status: modelStatus,
147
  partialText,
148
+ inference
149
  } = useInference({
150
  apiKey:
151
  window.localStorage.getItem("huggingface_access_token") || undefined,
 
228
  content: prompt,
229
  });
230
 
231
+ const {status, result: modelResponse} = await inference({
232
  model: model,
233
  prompt,
234
  maxTokens: maxTokens,
235
  });
236
 
237
+ if (status === "error") {
238
+ pushConvo({
239
+ role: "error",
240
+ content: "Error during inference: " + modelResponse,
241
+ });
242
+
243
+ setAutoRunning(false);
244
+ return;
245
+ }
246
+
247
  pushConvo({
248
  role: "assistant",
249
  content: modelResponse,
 
713
  </div>
714
  </div>
715
  );
716
+ } else if (message.role === "error") {
717
+ return (
718
+ <div className="p-2 rounded-lg bg-red-50 border border-red-100 text-xs">
719
+ <p>{message.content}</p>
720
+ </div>
721
+ );
722
  }
723
 
724
  return null;
src/lib/inference.tsx CHANGED
@@ -98,19 +98,26 @@ export function useInference({ apiKey }) {
98
  prompt,
99
  model,
100
  maxTokens,
 
101
  }: {
102
  prompt: string;
103
  model: string;
104
  maxTokens: number;
 
105
  }) => {
106
  setIsLoading(true);
107
  setPartialText("");
108
 
109
  const client = new InferenceClient(apiKey);
110
 
 
 
 
 
 
111
  try {
112
  const stream = client.chatCompletionStream({
113
- provider: "hyperbolic",
114
  model,
115
  maxTokens,
116
  messages: [
@@ -132,12 +139,12 @@ export function useInference({ apiKey }) {
132
 
133
  setInferenceResult(result);
134
 
135
- return result;
136
  } catch (error) {
137
  console.error("Error in inference", error);
138
  setError(error.message);
139
  setIsLoading(false);
140
- return null;
141
  }
142
  };
143
 
 
98
  prompt,
99
  model,
100
  maxTokens,
101
+ provider = "fireworks-ai",
102
  }: {
103
  prompt: string;
104
  model: string;
105
  maxTokens: number;
106
+ provider: string;
107
  }) => {
108
  setIsLoading(true);
109
  setPartialText("");
110
 
111
  const client = new InferenceClient(apiKey);
112
 
113
+ // fireworks doesnt support max tokens
114
+ if (provider === "fireworks-ai") {
115
+ maxTokens = undefined;
116
+ }
117
+
118
  try {
119
  const stream = client.chatCompletionStream({
120
+ // provider,
121
  model,
122
  maxTokens,
123
  messages: [
 
139
 
140
  setInferenceResult(result);
141
 
142
+ return {status: "success", result};
143
  } catch (error) {
144
  console.error("Error in inference", error);
145
  setError(error.message);
146
  setIsLoading(false);
147
+ return {status: "error", result: error.message};
148
  }
149
  };
150