File size: 1,428 Bytes
b39afbe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
/**
 * Copyright (c) 2023 MERCENARIES.AI PTE. LTD.
 * All rights reserved.
 */

import { AnyClass } from "@casl/ability/dist/types/types";
import {  LLM, BaseLLMParams} from "langchain/llms";

interface AlpacaInput {
  /** Sampling temperature to use */
  temperature: number;

  /**
   * Maximum number of tokens to generate in the completion.
   */
  maxTokens: number;
  app: any;
  /** Model to use */
  model: string;
}

export class AlpacaLLM extends LLM implements AlpacaInput {
  temperature = 0;

  maxTokens = 250;

  model: string;
  app: any;

  constructor(fields?: Partial<AlpacaInput> & BaseLLMParams) {
    super(fields ?? {});
    this.maxTokens = fields?.maxTokens ?? this.maxTokens;
    this.temperature = fields?.temperature ?? this.temperature;
    this.model = fields?.model ?? "alpaca";
    this.app = fields?.app;
  }

  _llmType() {
    return "alpaca";
  }

  async _call(prompt: string, _stop?: string[]): Promise<string> {

    //@ts-ignore
    let pl = {user_id: "user", use_cache : true, do_sample: true, repetition_penalty: 1.1, temperature: 0.01, message: prompt, max_new_tokens:200}
    omnilog.log(pl)

    // Hit the `generate` endpoint on the `large` model
    const generateResponse = await this.app.api2.alpaca.createCompletion(pl)

    try {
      return generateResponse;
    } catch {
      omnilog.log(generateResponse);
      throw new Error("Could not parse response.");
    }
  }


}