id
stringlengths 14
16
| text
stringlengths 13
2.7k
| source
stringlengths 57
178
|
---|---|---|
9a18d086743f-0 | Source code for langchain.graphs.kuzu_graph
from typing import Any, Dict, List
[docs]class KuzuGraph:
"""Kùzu wrapper for graph operations.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
[docs] def __init__(self, db: Any, database: str = "kuzu") -> None:
try:
import kuzu
except ImportError:
raise ImportError(
"Could not import Kùzu python package."
"Please install Kùzu with `pip install kuzu`."
)
self.db = db
self.conn = kuzu.Connection(self.db)
self.database = database
self.refresh_schema()
@property
def get_schema(self) -> str:
"""Returns the schema of the Kùzu database"""
return self.schema
[docs] def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]:
"""Query Kùzu database"""
params_list = []
for param_name in params:
params_list.append([param_name, params[param_name]])
result = self.conn.execute(query, params_list)
column_names = result.get_column_names()
return_list = [] | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/kuzu_graph.html |
9a18d086743f-1 | column_names = result.get_column_names()
return_list = []
while result.has_next():
row = result.get_next()
return_list.append(dict(zip(column_names, row)))
return return_list
[docs] def refresh_schema(self) -> None:
"""Refreshes the Kùzu graph schema information"""
node_properties = []
node_table_names = self.conn._get_node_table_names()
for table_name in node_table_names:
current_table_schema = {"properties": [], "label": table_name}
properties = self.conn._get_node_property_names(table_name)
for property_name in properties:
property_type = properties[property_name]["type"]
list_type_flag = ""
if properties[property_name]["dimension"] > 0:
if "shape" in properties[property_name]:
for s in properties[property_name]["shape"]:
list_type_flag += "[%s]" % s
else:
for i in range(properties[property_name]["dimension"]):
list_type_flag += "[]"
property_type += list_type_flag
current_table_schema["properties"].append(
(property_name, property_type)
)
node_properties.append(current_table_schema)
relationships = []
rel_tables = self.conn._get_rel_table_names()
for table in rel_tables:
relationships.append(
"(:%s)-[:%s]->(:%s)" % (table["src"], table["name"], table["dst"])
)
rel_properties = []
for table in rel_tables:
current_table_schema = {"properties": [], "label": table["name"]}
properties_text = self.conn._connection.get_rel_property_names(
table["name"]
).split("\n") | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/kuzu_graph.html |
9a18d086743f-2 | table["name"]
).split("\n")
for i, line in enumerate(properties_text):
# The first 3 lines defines src, dst and name, so we skip them
if i < 3:
continue
if not line:
continue
property_name, property_type = line.strip().split(" ")
current_table_schema["properties"].append(
(property_name, property_type)
)
rel_properties.append(current_table_schema)
self.schema = (
f"Node properties: {node_properties}\n"
f"Relationships properties: {rel_properties}\n"
f"Relationships: {relationships}\n"
) | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/kuzu_graph.html |
a830323f4010-0 | Source code for langchain.graphs.neptune_graph
from typing import Any, Dict, List, Optional, Tuple, Union
[docs]class NeptuneQueryException(Exception):
"""A class to handle queries that fail to execute"""
def __init__(self, exception: Union[str, Dict]):
if isinstance(exception, dict):
self.message = exception["message"] if "message" in exception else "unknown"
self.details = exception["details"] if "details" in exception else "unknown"
else:
self.message = exception
self.details = "unknown"
def get_message(self) -> str:
return self.message
def get_details(self) -> Any:
return self.details
[docs]class NeptuneGraph:
"""Neptune wrapper for graph operations.
Args:
host: endpoint for the database instance
port: port number for the database instance, default is 8182
use_https: whether to use secure connection, default is True
client: optional boto3 Neptune client
credentials_profile_name: optional AWS profile name
region_name: optional AWS region, e.g., us-west-2
service: optional service name, default is neptunedata
sign: optional, whether to sign the request payload, default is True
Example:
.. code-block:: python
graph = NeptuneGraph(
host='<my-cluster>',
port=8182
)
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database. | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/neptune_graph.html |
a830323f4010-1 | data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
[docs] def __init__(
self,
host: str,
port: int = 8182,
use_https: bool = True,
client: Any = None,
credentials_profile_name: Optional[str] = None,
region_name: Optional[str] = None,
service: str = "neptunedata",
sign: bool = True,
) -> None:
"""Create a new Neptune graph wrapper instance."""
try:
if client is not None:
self.client = client
else:
import boto3
if credentials_profile_name is not None:
session = boto3.Session(profile_name=credentials_profile_name)
else:
# use default credentials
session = boto3.Session()
client_params = {}
if region_name:
client_params["region_name"] = region_name
protocol = "https" if use_https else "http"
client_params["endpoint_url"] = f"{protocol}://{host}:{port}"
if sign:
self.client = session.client(service, **client_params)
else:
from botocore import UNSIGNED
from botocore.config import Config
self.client = session.client(
service,
**client_params,
config=Config(signature_version=UNSIGNED),
)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
) | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/neptune_graph.html |
a830323f4010-2 | "Please install it with `pip install boto3`."
)
except Exception as e:
if type(e).__name__ == "UnknownServiceError":
raise ModuleNotFoundError(
"NeptuneGraph requires a boto3 version 1.28.38 or greater."
"Please install it with `pip install -U boto3`."
) from e
else:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
try:
self._refresh_schema()
except Exception as e:
raise NeptuneQueryException(
{
"message": "Could not get schema for Neptune database",
"detail": str(e),
}
)
@property
def get_schema(self) -> str:
"""Returns the schema of the Neptune database"""
return self.schema
[docs] def query(self, query: str, params: dict = {}) -> Dict[str, Any]:
"""Query Neptune database."""
try:
return self.client.execute_open_cypher_query(openCypherQuery=query)
except Exception as e:
raise NeptuneQueryException(
{
"message": "An error occurred while executing the query.",
"details": str(e),
}
)
def _get_summary(self) -> Dict:
try:
response = self.client.get_propertygraph_summary()
except Exception as e:
raise NeptuneQueryException(
{
"message": (
"Summary API is not available for this instance of Neptune,"
"ensure the engine version is >=1.2.1.0"
), | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/neptune_graph.html |
a830323f4010-3 | "ensure the engine version is >=1.2.1.0"
),
"details": str(e),
}
)
try:
summary = response["payload"]["graphSummary"]
except Exception:
raise NeptuneQueryException(
{
"message": "Summary API did not return a valid response.",
"details": response.content.decode(),
}
)
else:
return summary
def _get_labels(self) -> Tuple[List[str], List[str]]:
"""Get node and edge labels from the Neptune statistics summary"""
summary = self._get_summary()
n_labels = summary["nodeLabels"]
e_labels = summary["edgeLabels"]
return n_labels, e_labels
def _get_triples(self, e_labels: List[str]) -> List[str]:
triple_query = """
MATCH (a)-[e:`{e_label}`]->(b)
WITH a,e,b LIMIT 3000
RETURN DISTINCT labels(a) AS from, type(e) AS edge, labels(b) AS to
LIMIT 10
"""
triple_template = "(:`{a}`)-[:`{e}`]->(:`{b}`)"
triple_schema = []
for label in e_labels:
q = triple_query.format(e_label=label)
data = self.query(q)
for d in data["results"]:
triple = triple_template.format(
a=d["from"][0], e=d["edge"], b=d["to"][0]
)
triple_schema.append(triple)
return triple_schema
def _get_node_properties(self, n_labels: List[str], types: Dict) -> List:
node_properties_query = """ | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/neptune_graph.html |
a830323f4010-4 | node_properties_query = """
MATCH (a:`{n_label}`)
RETURN properties(a) AS props
LIMIT 100
"""
node_properties = []
for label in n_labels:
q = node_properties_query.format(n_label=label)
data = {"label": label, "properties": self.query(q)["results"]}
s = set({})
for p in data["properties"]:
for k, v in p["props"].items():
s.add((k, types[type(v).__name__]))
np = {
"properties": [{"property": k, "type": v} for k, v in s],
"labels": label,
}
node_properties.append(np)
return node_properties
def _get_edge_properties(self, e_labels: List[str], types: Dict[str, Any]) -> List:
edge_properties_query = """
MATCH ()-[e:`{e_label}`]->()
RETURN properties(e) AS props
LIMIT 100
"""
edge_properties = []
for label in e_labels:
q = edge_properties_query.format(e_label=label)
data = {"label": label, "properties": self.query(q)["results"]}
s = set({})
for p in data["properties"]:
for k, v in p["props"].items():
s.add((k, types[type(v).__name__]))
ep = {
"type": label,
"properties": [{"property": k, "type": v} for k, v in s],
}
edge_properties.append(ep)
return edge_properties
def _refresh_schema(self) -> None:
"""
Refreshes the Neptune graph schema information.
"""
types = { | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/neptune_graph.html |
a830323f4010-5 | Refreshes the Neptune graph schema information.
"""
types = {
"str": "STRING",
"float": "DOUBLE",
"int": "INTEGER",
"list": "LIST",
"dict": "MAP",
"bool": "BOOLEAN",
}
n_labels, e_labels = self._get_labels()
triple_schema = self._get_triples(e_labels)
node_properties = self._get_node_properties(n_labels, types)
edge_properties = self._get_edge_properties(e_labels, types)
self.schema = f"""
Node properties are the following:
{node_properties}
Relationship properties are the following:
{edge_properties}
The relationships are the following:
{triple_schema}
""" | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/neptune_graph.html |
e8d39e6beec3-0 | Source code for langchain.graphs.networkx_graph
"""Networkx wrapper for graph operations."""
from __future__ import annotations
from typing import Any, List, NamedTuple, Optional, Tuple
KG_TRIPLE_DELIMITER = "<|>"
[docs]class KnowledgeTriple(NamedTuple):
"""A triple in the graph."""
subject: str
predicate: str
object_: str
[docs] @classmethod
def from_string(cls, triple_string: str) -> "KnowledgeTriple":
"""Create a KnowledgeTriple from a string."""
subject, predicate, object_ = triple_string.strip().split(", ")
subject = subject[1:]
object_ = object_[:-1]
return cls(subject, predicate, object_)
[docs]def parse_triples(knowledge_str: str) -> List[KnowledgeTriple]:
"""Parse knowledge triples from the knowledge string."""
knowledge_str = knowledge_str.strip()
if not knowledge_str or knowledge_str == "NONE":
return []
triple_strs = knowledge_str.split(KG_TRIPLE_DELIMITER)
results = []
for triple_str in triple_strs:
try:
kg_triple = KnowledgeTriple.from_string(triple_str)
except ValueError:
continue
results.append(kg_triple)
return results
[docs]def get_entities(entity_str: str) -> List[str]:
"""Extract entities from entity string."""
if entity_str.strip() == "NONE":
return []
else:
return [w.strip() for w in entity_str.split(",")]
[docs]class NetworkxEntityGraph:
"""Networkx wrapper for entity graph operations.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions. | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html |
e8d39e6beec3-1 | that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
[docs] def __init__(self, graph: Optional[Any] = None) -> None:
"""Create a new graph."""
try:
import networkx as nx
except ImportError:
raise ImportError(
"Could not import networkx python package. "
"Please install it with `pip install networkx`."
)
if graph is not None:
if not isinstance(graph, nx.DiGraph):
raise ValueError("Passed in graph is not of correct shape")
self._graph = graph
else:
self._graph = nx.DiGraph()
[docs] @classmethod
def from_gml(cls, gml_path: str) -> NetworkxEntityGraph:
try:
import networkx as nx
except ImportError:
raise ImportError(
"Could not import networkx python package. "
"Please install it with `pip install networkx`."
)
graph = nx.read_gml(gml_path)
return cls(graph)
[docs] def add_triple(self, knowledge_triple: KnowledgeTriple) -> None:
"""Add a triple to the graph."""
# Creates nodes if they don't exist
# Overwrites existing edges | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html |
e8d39e6beec3-2 | # Creates nodes if they don't exist
# Overwrites existing edges
if not self._graph.has_node(knowledge_triple.subject):
self._graph.add_node(knowledge_triple.subject)
if not self._graph.has_node(knowledge_triple.object_):
self._graph.add_node(knowledge_triple.object_)
self._graph.add_edge(
knowledge_triple.subject,
knowledge_triple.object_,
relation=knowledge_triple.predicate,
)
[docs] def delete_triple(self, knowledge_triple: KnowledgeTriple) -> None:
"""Delete a triple from the graph."""
if self._graph.has_edge(knowledge_triple.subject, knowledge_triple.object_):
self._graph.remove_edge(knowledge_triple.subject, knowledge_triple.object_)
[docs] def get_triples(self) -> List[Tuple[str, str, str]]:
"""Get all triples in the graph."""
return [(u, v, d["relation"]) for u, v, d in self._graph.edges(data=True)]
[docs] def get_entity_knowledge(self, entity: str, depth: int = 1) -> List[str]:
"""Get information about an entity."""
import networkx as nx
# TODO: Have more information-specific retrieval methods
if not self._graph.has_node(entity):
return []
results = []
for src, sink in nx.dfs_edges(self._graph, entity, depth_limit=depth):
relation = self._graph[src][sink]["relation"]
results.append(f"{src} {relation} {sink}")
return results
[docs] def write_to_gml(self, path: str) -> None:
import networkx as nx
nx.write_gml(self._graph, path) | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html |
e8d39e6beec3-3 | import networkx as nx
nx.write_gml(self._graph, path)
[docs] def clear(self) -> None:
"""Clear the graph."""
self._graph.clear()
[docs] def get_topological_sort(self) -> List[str]:
"""Get a list of entity names in the graph sorted by causal dependence."""
import networkx as nx
return list(nx.topological_sort(self._graph))
[docs] def draw_graphviz(self, **kwargs: Any) -> None:
"""
Provides better drawing
Usage in a jupyter notebook:
>>> from IPython.display import SVG
>>> self.draw_graphviz_svg(layout="dot", filename="web.svg")
>>> SVG('web.svg')
"""
from networkx.drawing.nx_agraph import to_agraph
try:
import pygraphviz # noqa: F401
except ImportError as e:
if e.name == "_graphviz":
"""
>>> e.msg # pygraphviz throws this error
ImportError: libcgraph.so.6: cannot open shared object file
"""
raise ImportError(
"Could not import graphviz debian package. "
"Please install it with:"
"`sudo apt-get update`"
"`sudo apt-get install graphviz graphviz-dev`"
)
else:
raise ImportError(
"Could not import pygraphviz python package. "
"Please install it with:"
"`pip install pygraphviz`."
)
graph = to_agraph(self._graph) # --> pygraphviz.agraph.AGraph
# pygraphviz.github.io/documentation/stable/tutorial.html#layout-and-drawing
graph.layout(prog=kwargs.get("prog", "dot")) | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html |
e8d39e6beec3-4 | graph.layout(prog=kwargs.get("prog", "dot"))
graph.draw(kwargs.get("path", "graph.svg")) | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html |
a7058a339ea3-0 | Source code for langchain.graphs.neo4j_graph
from typing import Any, Dict, List, Optional
from langchain.graphs.graph_document import GraphDocument
from langchain.graphs.graph_store import GraphStore
from langchain.utils import get_from_env
node_properties_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE NOT type = "RELATIONSHIP" AND elementType = "node"
WITH label AS nodeLabels, collect({property:property, type:type}) AS properties
RETURN {labels: nodeLabels, properties: properties} AS output
"""
rel_properties_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE NOT type = "RELATIONSHIP" AND elementType = "relationship"
WITH label AS nodeLabels, collect({property:property, type:type}) AS properties
RETURN {type: nodeLabels, properties: properties} AS output
"""
rel_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE type = "RELATIONSHIP" AND elementType = "node"
UNWIND other AS other_node
RETURN {start: label, type: property, end: toString(other_node)} AS output
"""
[docs]class Neo4jGraph(GraphStore):
"""Neo4j wrapper for graph operations.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate) | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/neo4j_graph.html |
a7058a339ea3-1 | The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
[docs] def __init__(
self,
url: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
database: str = "neo4j",
) -> None:
"""Create a new Neo4j graph wrapper instance."""
try:
import neo4j
except ImportError:
raise ValueError(
"Could not import neo4j python package. "
"Please install it with `pip install neo4j`."
)
url = get_from_env("url", "NEO4J_URI", url)
username = get_from_env("username", "NEO4J_USERNAME", username)
password = get_from_env("password", "NEO4J_PASSWORD", password)
database = get_from_env("database", "NEO4J_DATABASE", database)
self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password))
self._database = database
self.schema: str = ""
self.structured_schema: Dict[str, Any] = {}
# Verify connection
try:
self._driver.verify_connectivity()
except neo4j.exceptions.ServiceUnavailable:
raise ValueError(
"Could not connect to Neo4j database. "
"Please ensure that the url is correct"
)
except neo4j.exceptions.AuthError:
raise ValueError(
"Could not connect to Neo4j database. "
"Please ensure that the username and password are correct" | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/neo4j_graph.html |
a7058a339ea3-2 | "Please ensure that the username and password are correct"
)
# Set schema
try:
self.refresh_schema()
except neo4j.exceptions.ClientError:
raise ValueError(
"Could not use APOC procedures. "
"Please ensure the APOC plugin is installed in Neo4j and that "
"'apoc.meta.data()' is allowed in Neo4j configuration "
)
@property
def get_schema(self) -> str:
"""Returns the schema of the Graph"""
return self.schema
@property
def get_structured_schema(self) -> Dict[str, Any]:
"""Returns the structured schema of the Graph"""
return self.structured_schema
[docs] def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]:
"""Query Neo4j database."""
from neo4j.exceptions import CypherSyntaxError
with self._driver.session(database=self._database) as session:
try:
data = session.run(query, params)
return [r.data() for r in data]
except CypherSyntaxError as e:
raise ValueError(f"Generated Cypher Statement is not valid\n{e}")
[docs] def refresh_schema(self) -> None:
"""
Refreshes the Neo4j graph schema information.
"""
node_properties = [el["output"] for el in self.query(node_properties_query)]
rel_properties = [el["output"] for el in self.query(rel_properties_query)]
relationships = [el["output"] for el in self.query(rel_query)]
self.structured_schema = {
"node_props": {el["labels"]: el["properties"] for el in node_properties}, | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/neo4j_graph.html |
a7058a339ea3-3 | "rel_props": {el["type"]: el["properties"] for el in rel_properties},
"relationships": relationships,
}
self.schema = f"""
Node properties are the following:
{node_properties}
Relationship properties are the following:
{rel_properties}
The relationships are the following:
{[f"(:{el['start']})-[:{el['type']}]->(:{el['end']})" for el in relationships]}
"""
[docs] def add_graph_documents(
self, graph_documents: List[GraphDocument], include_source: bool = False
) -> None:
"""
Take GraphDocument as input as uses it to construct a graph.
"""
for document in graph_documents:
include_docs_query = (
"CREATE (d:Document) "
"SET d.text = $document.page_content "
"SET d += $document.metadata "
"WITH d "
)
# Import nodes
self.query(
(
f"{include_docs_query if include_source else ''}"
"UNWIND $data AS row "
"CALL apoc.merge.node([row.type], {id: row.id}, "
"row.properties, {}) YIELD node "
f"{'MERGE (d)-[:MENTIONS]->(node) ' if include_source else ''}"
"RETURN distinct 'done' AS result"
),
{
"data": [el.__dict__ for el in document.nodes],
"document": document.source.__dict__,
},
)
# Import relationships
self.query(
"UNWIND $data AS row " | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/neo4j_graph.html |
a7058a339ea3-4 | self.query(
"UNWIND $data AS row "
"CALL apoc.merge.node([row.source_label], {id: row.source},"
"{}, {}) YIELD node as source "
"CALL apoc.merge.node([row.target_label], {id: row.target},"
"{}, {}) YIELD node as target "
"CALL apoc.merge.relationship(source, row.type, "
"{}, row.properties, target) YIELD rel "
"RETURN distinct 'done'",
{
"data": [
{
"source": el.source.id,
"source_label": el.source.type,
"target": el.target.id,
"target_label": el.target.type,
"type": el.type.replace(" ", "_").upper(),
"properties": el.properties,
}
for el in document.relationships
]
},
) | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/neo4j_graph.html |
8835e4fc83b3-0 | Source code for langchain.graphs.graph_document
from __future__ import annotations
from typing import List, Union
from langchain.load.serializable import Serializable
from langchain.pydantic_v1 import Field
from langchain.schema import Document
[docs]class Node(Serializable):
"""Represents a node in a graph with associated properties.
Attributes:
id (Union[str, int]): A unique identifier for the node.
type (str): The type or label of the node, default is "Node".
properties (dict): Additional properties and metadata associated with the node.
"""
id: Union[str, int]
type: str = "Node"
properties: dict = Field(default_factory=dict)
[docs]class Relationship(Serializable):
"""Represents a directed relationship between two nodes in a graph.
Attributes:
source (Node): The source node of the relationship.
target (Node): The target node of the relationship.
type (str): The type of the relationship.
properties (dict): Additional properties associated with the relationship.
"""
source: Node
target: Node
type: str
properties: dict = Field(default_factory=dict)
[docs]class GraphDocument(Serializable):
"""Represents a graph document consisting of nodes and relationships.
Attributes:
nodes (List[Node]): A list of nodes in the graph.
relationships (List[Relationship]): A list of relationships in the graph.
source (Document): The document from which the graph information is derived.
"""
nodes: List[Node]
relationships: List[Relationship]
source: Document | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/graph_document.html |
b0c785826f45-0 | Source code for langchain.graphs.hugegraph
from typing import Any, Dict, List
[docs]class HugeGraph:
"""HugeGraph wrapper for graph operations.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
[docs] def __init__(
self,
username: str = "default",
password: str = "default",
address: str = "127.0.0.1",
port: int = 8081,
graph: str = "hugegraph",
) -> None:
"""Create a new HugeGraph wrapper instance."""
try:
from hugegraph.connection import PyHugeGraph
except ImportError:
raise ValueError(
"Please install HugeGraph Python client first: "
"`pip3 install hugegraph-python`"
)
self.username = username
self.password = password
self.address = address
self.port = port
self.graph = graph
self.client = PyHugeGraph(
address, port, user=username, pwd=password, graph=graph
)
self.schema = ""
# Set schema
try:
self.refresh_schema()
except Exception as e: | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/hugegraph.html |
b0c785826f45-1 | try:
self.refresh_schema()
except Exception as e:
raise ValueError(f"Could not refresh schema. Error: {e}")
@property
def get_schema(self) -> str:
"""Returns the schema of the HugeGraph database"""
return self.schema
[docs] def refresh_schema(self) -> None:
"""
Refreshes the HugeGraph schema information.
"""
schema = self.client.schema()
vertex_schema = schema.getVertexLabels()
edge_schema = schema.getEdgeLabels()
relationships = schema.getRelations()
self.schema = (
f"Node properties: {vertex_schema}\n"
f"Edge properties: {edge_schema}\n"
f"Relationships: {relationships}\n"
)
[docs] def query(self, query: str) -> List[Dict[str, Any]]:
g = self.client.gremlin()
res = g.exec(query)
return res["data"] | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/hugegraph.html |
ade25e025f6a-0 | Source code for langchain.graphs.arangodb_graph
import os
from math import ceil
from typing import Any, Dict, List, Optional
[docs]class ArangoGraph:
"""ArangoDB wrapper for graph operations.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
[docs] def __init__(self, db: Any) -> None:
"""Create a new ArangoDB graph wrapper instance."""
self.set_db(db)
self.set_schema()
@property
def db(self) -> Any:
return self.__db
@property
def schema(self) -> Dict[str, Any]:
return self.__schema
[docs] def set_db(self, db: Any) -> None:
from arango.database import Database
if not isinstance(db, Database):
msg = "**db** parameter must inherit from arango.database.Database"
raise TypeError(msg)
self.__db: Database = db
self.set_schema()
[docs] def set_schema(self, schema: Optional[Dict[str, Any]] = None) -> None:
"""
Set the schema of the ArangoDB Database.
Auto-generates Schema if **schema** is None.
""" | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/arangodb_graph.html |
ade25e025f6a-1 | Auto-generates Schema if **schema** is None.
"""
self.__schema = self.generate_schema() if schema is None else schema
[docs] def generate_schema(
self, sample_ratio: float = 0
) -> Dict[str, List[Dict[str, Any]]]:
"""
Generates the schema of the ArangoDB Database and returns it
User can specify a **sample_ratio** (0 to 1) to determine the
ratio of documents/edges used (in relation to the Collection size)
to render each Collection Schema.
"""
if not 0 <= sample_ratio <= 1:
raise ValueError("**sample_ratio** value must be in between 0 to 1")
# Stores the Edge Relationships between each ArangoDB Document Collection
graph_schema: List[Dict[str, Any]] = [
{"graph_name": g["name"], "edge_definitions": g["edge_definitions"]}
for g in self.db.graphs()
]
# Stores the schema of every ArangoDB Document/Edge collection
collection_schema: List[Dict[str, Any]] = []
for collection in self.db.collections():
if collection["system"]:
continue
# Extract collection name, type, and size
col_name: str = collection["name"]
col_type: str = collection["type"]
col_size: int = self.db.collection(col_name).count()
# Skip collection if empty
if col_size == 0:
continue
# Set number of ArangoDB documents/edges to retrieve
limit_amount = ceil(sample_ratio * col_size) or 1
aql = f"""
FOR doc in {col_name}
LIMIT {limit_amount}
RETURN doc
""" | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/arangodb_graph.html |
ade25e025f6a-2 | LIMIT {limit_amount}
RETURN doc
"""
doc: Dict[str, Any]
properties: List[Dict[str, str]] = []
for doc in self.__db.aql.execute(aql):
for key, value in doc.items():
properties.append({"name": key, "type": type(value).__name__})
collection_schema.append(
{
"collection_name": col_name,
"collection_type": col_type,
f"{col_type}_properties": properties,
f"example_{col_type}": doc,
}
)
return {"Graph Schema": graph_schema, "Collection Schema": collection_schema}
[docs] def query(
self, query: str, top_k: Optional[int] = None, **kwargs: Any
) -> List[Dict[str, Any]]:
"""Query the ArangoDB database."""
import itertools
cursor = self.__db.aql.execute(query, **kwargs)
return [doc for doc in itertools.islice(cursor, top_k)]
[docs] @classmethod
def from_db_credentials(
cls,
url: Optional[str] = None,
dbname: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Convenience constructor that builds Arango DB from credentials.
Args:
url: Arango DB url. Can be passed in as named arg or set as environment
var ``ARANGODB_URL``. Defaults to "http://localhost:8529".
dbname: Arango DB name. Can be passed in as named arg or set as
environment var ``ARANGODB_DBNAME``. Defaults to "_system". | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/arangodb_graph.html |
ade25e025f6a-3 | environment var ``ARANGODB_DBNAME``. Defaults to "_system".
username: Can be passed in as named arg or set as environment var
``ARANGODB_USERNAME``. Defaults to "root".
password: Can be passed ni as named arg or set as environment var
``ARANGODB_PASSWORD``. Defaults to "".
Returns:
An arango.database.StandardDatabase.
"""
db = get_arangodb_client(
url=url, dbname=dbname, username=username, password=password
)
return cls(db)
[docs]def get_arangodb_client(
url: Optional[str] = None,
dbname: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get the Arango DB client from credentials.
Args:
url: Arango DB url. Can be passed in as named arg or set as environment
var ``ARANGODB_URL``. Defaults to "http://localhost:8529".
dbname: Arango DB name. Can be passed in as named arg or set as
environment var ``ARANGODB_DBNAME``. Defaults to "_system".
username: Can be passed in as named arg or set as environment var
``ARANGODB_USERNAME``. Defaults to "root".
password: Can be passed ni as named arg or set as environment var
``ARANGODB_PASSWORD``. Defaults to "".
Returns:
An arango.database.StandardDatabase.
"""
try:
from arango import ArangoClient
except ImportError as e:
raise ImportError(
"Unable to import arango, please install with `pip install python-arango`."
) from e | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/arangodb_graph.html |
ade25e025f6a-4 | ) from e
_url: str = url or os.environ.get("ARANGODB_URL", "http://localhost:8529") # type: ignore[assignment] # noqa: E501
_dbname: str = dbname or os.environ.get("ARANGODB_DBNAME", "_system") # type: ignore[assignment] # noqa: E501
_username: str = username or os.environ.get("ARANGODB_USERNAME", "root") # type: ignore[assignment] # noqa: E501
_password: str = password or os.environ.get("ARANGODB_PASSWORD", "") # type: ignore[assignment] # noqa: E501
return ArangoClient(_url).db(_dbname, _username, _password, verify=True) | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/arangodb_graph.html |
b23bed57542e-0 | Source code for langchain.graphs.nebula_graph
import logging
from string import Template
from typing import Any, Dict, Optional
logger = logging.getLogger(__name__)
rel_query = Template(
"""
MATCH ()-[e:`$edge_type`]->()
WITH e limit 1
MATCH (m)-[:`$edge_type`]->(n) WHERE id(m) == src(e) AND id(n) == dst(e)
RETURN "(:" + tags(m)[0] + ")-[:$edge_type]->(:" + tags(n)[0] + ")" AS rels
"""
)
RETRY_TIMES = 3
[docs]class NebulaGraph:
"""NebulaGraph wrapper for graph operations.
NebulaGraph inherits methods from Neo4jGraph to bring ease to the user space.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
[docs] def __init__(
self,
space: str,
username: str = "root",
password: str = "nebula",
address: str = "127.0.0.1",
port: int = 9669,
session_pool_size: int = 30,
) -> None:
"""Create a new NebulaGraph wrapper instance.""" | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/nebula_graph.html |
b23bed57542e-1 | ) -> None:
"""Create a new NebulaGraph wrapper instance."""
try:
import nebula3 # noqa: F401
import pandas # noqa: F401
except ImportError:
raise ValueError(
"Please install NebulaGraph Python client and pandas first: "
"`pip install nebula3-python pandas`"
)
self.username = username
self.password = password
self.address = address
self.port = port
self.space = space
self.session_pool_size = session_pool_size
self.session_pool = self._get_session_pool()
self.schema = ""
# Set schema
try:
self.refresh_schema()
except Exception as e:
raise ValueError(f"Could not refresh schema. Error: {e}")
def _get_session_pool(self) -> Any:
assert all(
[self.username, self.password, self.address, self.port, self.space]
), (
"Please provide all of the following parameters: "
"username, password, address, port, space"
)
from nebula3.Config import SessionPoolConfig
from nebula3.Exception import AuthFailedException, InValidHostname
from nebula3.gclient.net.SessionPool import SessionPool
config = SessionPoolConfig()
config.max_size = self.session_pool_size
try:
session_pool = SessionPool(
self.username,
self.password,
self.space,
[(self.address, self.port)],
)
except InValidHostname:
raise ValueError(
"Could not connect to NebulaGraph database. "
"Please ensure that the address and port are correct"
)
try:
session_pool.init(config) | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/nebula_graph.html |
b23bed57542e-2 | )
try:
session_pool.init(config)
except AuthFailedException:
raise ValueError(
"Could not connect to NebulaGraph database. "
"Please ensure that the username and password are correct"
)
except RuntimeError as e:
raise ValueError(f"Error initializing session pool. Error: {e}")
return session_pool
def __del__(self) -> None:
try:
self.session_pool.close()
except Exception as e:
logger.warning(f"Could not close session pool. Error: {e}")
@property
def get_schema(self) -> str:
"""Returns the schema of the NebulaGraph database"""
return self.schema
[docs] def execute(self, query: str, params: Optional[dict] = None, retry: int = 0) -> Any:
"""Query NebulaGraph database."""
from nebula3.Exception import IOErrorException, NoValidSessionException
from nebula3.fbthrift.transport.TTransport import TTransportException
params = params or {}
try:
result = self.session_pool.execute_parameter(query, params)
if not result.is_succeeded():
logger.warning(
f"Error executing query to NebulaGraph. "
f"Error: {result.error_msg()}\n"
f"Query: {query} \n"
)
return result
except NoValidSessionException:
logger.warning(
f"No valid session found in session pool. "
f"Please consider increasing the session pool size. "
f"Current size: {self.session_pool_size}"
)
raise ValueError(
f"No valid session found in session pool. " | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/nebula_graph.html |
b23bed57542e-3 | )
raise ValueError(
f"No valid session found in session pool. "
f"Please consider increasing the session pool size. "
f"Current size: {self.session_pool_size}"
)
except RuntimeError as e:
if retry < RETRY_TIMES:
retry += 1
logger.warning(
f"Error executing query to NebulaGraph. "
f"Retrying ({retry}/{RETRY_TIMES})...\n"
f"query: {query} \n"
f"Error: {e}"
)
return self.execute(query, params, retry)
else:
raise ValueError(f"Error executing query to NebulaGraph. Error: {e}")
except (TTransportException, IOErrorException):
# connection issue, try to recreate session pool
if retry < RETRY_TIMES:
retry += 1
logger.warning(
f"Connection issue with NebulaGraph. "
f"Retrying ({retry}/{RETRY_TIMES})...\n to recreate session pool"
)
self.session_pool = self._get_session_pool()
return self.execute(query, params, retry)
[docs] def refresh_schema(self) -> None:
"""
Refreshes the NebulaGraph schema information.
"""
tags_schema, edge_types_schema, relationships = [], [], []
for tag in self.execute("SHOW TAGS").column_values("Name"):
tag_name = tag.cast()
tag_schema = {"tag": tag_name, "properties": []}
r = self.execute(f"DESCRIBE TAG `{tag_name}`")
props, types = r.column_values("Field"), r.column_values("Type")
for i in range(r.row_size()): | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/nebula_graph.html |
b23bed57542e-4 | for i in range(r.row_size()):
tag_schema["properties"].append((props[i].cast(), types[i].cast()))
tags_schema.append(tag_schema)
for edge_type in self.execute("SHOW EDGES").column_values("Name"):
edge_type_name = edge_type.cast()
edge_schema = {"edge": edge_type_name, "properties": []}
r = self.execute(f"DESCRIBE EDGE `{edge_type_name}`")
props, types = r.column_values("Field"), r.column_values("Type")
for i in range(r.row_size()):
edge_schema["properties"].append((props[i].cast(), types[i].cast()))
edge_types_schema.append(edge_schema)
# build relationships types
r = self.execute(
rel_query.substitute(edge_type=edge_type_name)
).column_values("rels")
if len(r) > 0:
relationships.append(r[0].cast())
self.schema = (
f"Node properties: {tags_schema}\n"
f"Edge properties: {edge_types_schema}\n"
f"Relationships: {relationships}\n"
)
[docs] def query(self, query: str, retry: int = 0) -> Dict[str, Any]:
result = self.execute(query, retry=retry)
columns = result.keys()
d: Dict[str, list] = {}
for col_num in range(result.col_size()):
col_name = columns[col_num]
col_list = result.column_values(col_name)
d[col_name] = [x.cast() for x in col_list]
return d | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/nebula_graph.html |
702633c1c847-0 | Source code for langchain.graphs.memgraph_graph
from langchain.graphs.neo4j_graph import Neo4jGraph
SCHEMA_QUERY = """
CALL llm_util.schema("prompt_ready")
YIELD *
RETURN *
"""
RAW_SCHEMA_QUERY = """
CALL llm_util.schema("raw")
YIELD *
RETURN *
"""
[docs]class MemgraphGraph(Neo4jGraph):
"""Memgraph wrapper for graph operations.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
[docs] def __init__(
self, url: str, username: str, password: str, *, database: str = "memgraph"
) -> None:
"""Create a new Memgraph graph wrapper instance."""
super().__init__(url, username, password, database=database)
[docs] def refresh_schema(self) -> None:
"""
Refreshes the Memgraph graph schema information.
"""
db_schema = self.query(SCHEMA_QUERY)[0].get("schema")
assert db_schema is not None
self.schema = db_schema
db_structured_schema = self.query(RAW_SCHEMA_QUERY)[0].get("schema")
assert db_structured_schema is not None
self.structured_schema = db_structured_schema | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/memgraph_graph.html |
447fc2513621-0 | Source code for langchain.graphs.falkordb_graph
from typing import Any, Dict, List, Optional
from langchain.graphs.graph_document import GraphDocument
from langchain.graphs.graph_store import GraphStore
node_properties_query = """
MATCH (n)
WITH keys(n) as keys, labels(n) AS labels
WITH CASE WHEN keys = [] THEN [NULL] ELSE keys END AS keys, labels
UNWIND labels AS label
UNWIND keys AS key
WITH label, collect(DISTINCT key) AS keys
RETURN {label:label, keys:keys} AS output
"""
rel_properties_query = """
MATCH ()-[r]->()
WITH keys(r) as keys, type(r) AS types
WITH CASE WHEN keys = [] THEN [NULL] ELSE keys END AS keys, types
UNWIND types AS type
UNWIND keys AS key WITH type,
collect(DISTINCT key) AS keys
RETURN {types:type, keys:keys} AS output
"""
rel_query = """
MATCH (n)-[r]->(m)
UNWIND labels(n) as src_label
UNWIND labels(m) as dst_label
UNWIND type(r) as rel_type
RETURN DISTINCT {start: src_label, type: rel_type, end: dst_label} AS output
"""
[docs]class FalkorDBGraph(GraphStore):
"""FalkorDB wrapper for graph operations.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database. | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/falkordb_graph.html |
447fc2513621-1 | data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
[docs] def __init__(
self,
database: str,
host: str = "localhost",
port: int = 6379,
username: Optional[str] = None,
password: Optional[str] = None,
ssl: bool = False,
) -> None:
"""Create a new FalkorDB graph wrapper instance."""
try:
import redis
from redis.commands.graph import Graph
except ImportError:
raise ImportError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
self._driver = redis.Redis(
host=host, port=port, username=username, password=password, ssl=ssl
)
self._graph = Graph(self._driver, database)
self.schema: str = ""
self.structured_schema: Dict[str, Any] = {}
try:
self.refresh_schema()
except Exception as e:
raise ValueError(f"Could not refresh schema. Error: {e}")
@property
def get_schema(self) -> str:
"""Returns the schema of the FalkorDB database"""
return self.schema
@property
def get_structured_schema(self) -> Dict[str, Any]:
"""Returns the structured schema of the Graph"""
return self.structured_schema
[docs] def refresh_schema(self) -> None:
"""Refreshes the schema of the FalkorDB database""" | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/falkordb_graph.html |
447fc2513621-2 | """Refreshes the schema of the FalkorDB database"""
node_properties: List[Any] = self.query(node_properties_query)
rel_properties: List[Any] = self.query(rel_properties_query)
relationships: List[Any] = self.query(rel_query)
self.structured_schema = {
"node_props": {el[0]["label"]: el[0]["keys"] for el in node_properties},
"rel_props": {el[0]["types"]: el[0]["keys"] for el in rel_properties},
"relationships": [el[0] for el in relationships],
}
self.schema = (
f"Node properties: {node_properties}\n"
f"Relationships properties: {rel_properties}\n"
f"Relationships: {relationships}\n"
)
[docs] def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]:
"""Query FalkorDB database."""
try:
data = self._graph.query(query, params)
return data.result_set
except Exception as e:
raise ValueError("Generated Cypher Statement is not valid\n" f"{e}")
[docs] def add_graph_documents(
self, graph_documents: List[GraphDocument], include_source: bool = False
) -> None:
"""
Take GraphDocument as input as uses it to construct a graph.
"""
for document in graph_documents:
# Import nodes
for node in document.nodes:
self.query(
(
f"MERGE (n:{node.type} {{id:'{node.id}'}}) "
"SET n += $properties "
"RETURN distinct 'done' AS result"
), | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/falkordb_graph.html |
447fc2513621-3 | "RETURN distinct 'done' AS result"
),
{"properties": node.properties},
)
# Import relationships
for rel in document.relationships:
self.query(
(
f"MATCH (a:{rel.source.type} {{id:'{rel.source.id}'}}), "
f"(b:{rel.target.type} {{id:'{rel.target.id}'}}) "
f"MERGE (a)-[r:{(rel.type.replace(' ', '_').upper())}]->(b) "
"SET r += $properties "
"RETURN distinct 'done' AS result"
),
{"properties": rel.properties},
) | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/falkordb_graph.html |
b76a57615593-0 | Source code for langchain.graphs.graph_store
from abc import abstractmethod
from typing import Any, Dict, List
from langchain.graphs.graph_document import GraphDocument
[docs]class GraphStore:
"""An abstract class wrapper for graph operations."""
@property
@abstractmethod
def get_schema(self) -> str:
"""Returns the schema of the Graph database"""
pass
@property
@abstractmethod
def get_structured_schema(self) -> Dict[str, Any]:
"""Returns the schema of the Graph database"""
pass
[docs] @abstractmethod
def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]:
"""Query the graph."""
pass
[docs] @abstractmethod
def refresh_schema(self) -> None:
"""Refreshes the graph schema information."""
pass
[docs] @abstractmethod
def add_graph_documents(
self, graph_documents: List[GraphDocument], include_source: bool = False
) -> None:
"""Take GraphDocument as input as uses it to construct a graph."""
pass | lang/api.python.langchain.com/en/latest/_modules/langchain/graphs/graph_store.html |
7107e9278fff-0 | Source code for langchain.llms.cerebriumai
import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class CerebriumAI(LLM):
"""CerebriumAI large language models.
To use, you should have the ``cerebrium`` python package installed, and the
environment variable ``CEREBRIUMAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import CerebriumAI
cerebrium = CerebriumAI(endpoint_url="")
"""
endpoint_url: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
cerebriumai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {}) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html |
7107e9278fff-1 | extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cerebriumai_api_key = get_from_dict_or_env(
values, "cerebriumai_api_key", "CEREBRIUMAI_API_KEY"
)
values["cerebriumai_api_key"] = cerebriumai_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "cerebriumai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to CerebriumAI endpoint."""
try:
from cerebrium import model_api_request
except ImportError:
raise ValueError( | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html |
7107e9278fff-2 | from cerebrium import model_api_request
except ImportError:
raise ValueError(
"Could not import cerebrium python package. "
"Please install it with `pip install cerebrium`."
)
params = self.model_kwargs or {}
response = model_api_request(
self.endpoint_url,
{"prompt": prompt, **params, **kwargs},
self.cerebriumai_api_key,
)
text = response["data"]["result"]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html |
d4ba7116d9a9-0 | Source code for langchain.llms.chatglm
import logging
from typing import Any, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
[docs]class ChatGLM(LLM):
"""ChatGLM LLM service.
Example:
.. code-block:: python
from langchain.llms import ChatGLM
endpoint_url = (
"http://127.0.0.1:8000"
)
ChatGLM_llm = ChatGLM(
endpoint_url=endpoint_url
)
"""
endpoint_url: str = "http://127.0.0.1:8000/"
"""Endpoint URL to use."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
max_token: int = 20000
"""Max token allowed to pass to the model."""
temperature: float = 0.1
"""LLM model temperature from 0 to 10."""
history: List[List] = []
"""History of the conversation"""
top_p: float = 0.7
"""Top P for nucleus sampling from 0 to 1"""
with_history: bool = False
"""Whether to use history or not"""
@property
def _llm_type(self) -> str:
return "chat_glm"
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url}, | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/chatglm.html |
d4ba7116d9a9-1 | return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to a ChatGLM LLM inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = chatglm_llm("Who are you?")
"""
_model_kwargs = self.model_kwargs or {}
# HTTP headers for authorization
headers = {"Content-Type": "application/json"}
payload = {
"prompt": prompt,
"temperature": self.temperature,
"history": self.history,
"max_length": self.max_token,
"top_p": self.top_p,
}
payload.update(_model_kwargs)
payload.update(kwargs)
logger.debug(f"ChatGLM payload: {payload}")
# call api
try:
response = requests.post(self.endpoint_url, headers=headers, json=payload)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
logger.debug(f"ChatGLM response: {response}")
if response.status_code != 200:
raise ValueError(f"Failed with response: {response}")
try:
parsed_response = response.json()
# Check if response content does exists
if isinstance(parsed_response, dict): | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/chatglm.html |
d4ba7116d9a9-2 | # Check if response content does exists
if isinstance(parsed_response, dict):
content_keys = "response"
if content_keys in parsed_response:
text = parsed_response[content_keys]
else:
raise ValueError(f"No content in response : {parsed_response}")
else:
raise ValueError(f"Unexpected response type: {parsed_response}")
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised during decoding response from inference endpoint: {e}."
f"\nResponse: {response.text}"
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
if self.with_history:
self.history = self.history + [[None, parsed_response["response"]]]
return text | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/chatglm.html |
08d6223e104d-0 | Source code for langchain.llms.huggingface_hub
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
DEFAULT_REPO_ID = "gpt2"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
[docs]class HuggingFaceHub(LLM):
"""HuggingFaceHub models.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation`, `text2text-generation` and `summarization` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceHub
hf = HuggingFaceHub(repo_id="gpt2", huggingfacehub_api_token="my-api-key")
"""
client: Any #: :meta private:
repo_id: str = DEFAULT_REPO_ID
"""Model name to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator() | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html |
08d6223e104d-1 | extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.inference_api import InferenceApi
repo_id = values["repo_id"]
client = InferenceApi(
repo_id=repo_id,
token=huggingfacehub_api_token,
task=values.get("task"),
)
if client.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {client.task}, "
f"currently only {VALID_TASKS} are supported"
)
values["client"] = client
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"repo_id": self.repo_id, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_hub"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None, | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html |
08d6223e104d-2 | run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
params = {**_model_kwargs, **kwargs}
response = self.client(inputs=prompt, params=params)
if "error" in response:
raise ValueError(f"Error raised by inference API: {response['error']}")
if self.client.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif self.client.task == "text2text-generation":
text = response[0]["generated_text"]
elif self.client.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.client.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html |
39a0014d469a-0 | Source code for langchain.llms.amazon_api_gateway
from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra
[docs]class ContentHandlerAmazonAPIGateway:
"""Adapter to prepare the inputs from Langchain to a format
that LLM model expects.
It also provides helper function to extract
the generated text from the model response."""
[docs] @classmethod
def transform_input(
cls, prompt: str, model_kwargs: Dict[str, Any]
) -> Dict[str, Any]:
return {"inputs": prompt, "parameters": model_kwargs}
[docs] @classmethod
def transform_output(cls, response: Any) -> str:
return response.json()[0]["generated_text"]
[docs]class AmazonAPIGateway(LLM):
"""Amazon API Gateway to access LLM models hosted on AWS."""
api_url: str
"""API Gateway URL"""
headers: Optional[Dict] = None
"""API Gateway HTTP Headers to send, e.g. for authentication"""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
content_handler: ContentHandlerAmazonAPIGateway = ContentHandlerAmazonAPIGateway()
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters.""" | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/amazon_api_gateway.html |
39a0014d469a-1 | """Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"api_url": self.api_url, "headers": self.headers},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "amazon_api_gateway"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Amazon API Gateway model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
payload = self.content_handler.transform_input(prompt, _model_kwargs)
try:
response = requests.post(
self.api_url,
headers=self.headers,
json=payload,
)
text = self.content_handler.transform_output(response)
except Exception as error:
raise ValueError(f"Error raised by the service: {error}")
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/amazon_api_gateway.html |
81b97065f8fd-0 | Source code for langchain.llms.azureml_endpoint
import json
import urllib.request
import warnings
from abc import abstractmethod
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import BaseModel, validator
from langchain.utils import get_from_dict_or_env
[docs]class AzureMLEndpointClient(object):
"""AzureML Managed Endpoint client."""
[docs] def __init__(
self, endpoint_url: str, endpoint_api_key: str, deployment_name: str = ""
) -> None:
"""Initialize the class."""
if not endpoint_api_key or not endpoint_url:
raise ValueError(
"""A key/token and REST endpoint should
be provided to invoke the endpoint"""
)
self.endpoint_url = endpoint_url
self.endpoint_api_key = endpoint_api_key
self.deployment_name = deployment_name
[docs] def call(self, body: bytes, **kwargs: Any) -> bytes:
"""call."""
# The azureml-model-deployment header will force the request to go to a
# specific deployment. Remove this header to have the request observe the
# endpoint traffic rules.
headers = {
"Content-Type": "application/json",
"Authorization": ("Bearer " + self.endpoint_api_key),
}
if self.deployment_name != "":
headers["azureml-model-deployment"] = self.deployment_name
req = urllib.request.Request(self.endpoint_url, body, headers)
response = urllib.request.urlopen(req, timeout=kwargs.get("timeout", 50))
result = response.read()
return result
[docs]class ContentFormatterBase: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html |
81b97065f8fd-1 | result = response.read()
return result
[docs]class ContentFormatterBase:
"""Transform request and response of AzureML endpoint to match with
required schema.
"""
"""
Example:
.. code-block:: python
class ContentFormatter(ContentFormatterBase):
content_type = "application/json"
accepts = "application/json"
def format_request_payload(
self,
prompt: str,
model_kwargs: Dict
) -> bytes:
input_str = json.dumps(
{
"inputs": {"input_string": [prompt]},
"parameters": model_kwargs,
}
)
return str.encode(input_str)
def format_response_payload(self, output: str) -> str:
response_json = json.loads(output)
return response_json[0]["0"]
"""
content_type: Optional[str] = "application/json"
"""The MIME type of the input data passed to the endpoint"""
accepts: Optional[str] = "application/json"
"""The MIME type of the response data returned from the endpoint"""
[docs] @staticmethod
def escape_special_characters(prompt: str) -> str:
"""Escapes any special characters in `prompt`"""
escape_map = {
"\\": "\\\\",
'"': '\\"',
"\b": "\\b",
"\f": "\\f",
"\n": "\\n",
"\r": "\\r",
"\t": "\\t",
}
# Replace each occurrence of the specified characters with escaped versions
for escape_sequence, escaped_sequence in escape_map.items():
prompt = prompt.replace(escape_sequence, escaped_sequence)
return prompt | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html |
81b97065f8fd-2 | prompt = prompt.replace(escape_sequence, escaped_sequence)
return prompt
[docs] @abstractmethod
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
"""Formats the request body according to the input schema of
the model. Returns bytes or seekable file like object in the
format specified in the content_type request header.
"""
[docs] @abstractmethod
def format_response_payload(self, output: bytes) -> str:
"""Formats the response body according to the output
schema of the model. Returns the data type that is
received from the response.
"""
[docs]class GPT2ContentFormatter(ContentFormatterBase):
"""Content handler for GPT2"""
[docs] def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{"inputs": {"input_string": [f'"{prompt}"']}, "parameters": model_kwargs}
)
return str.encode(request_payload)
[docs] def format_response_payload(self, output: bytes) -> str:
return json.loads(output)[0]["0"]
[docs]class OSSContentFormatter(GPT2ContentFormatter):
"""Deprecated: Kept for backwards compatibility
Content handler for LLMs from the OSS catalog."""
content_formatter: Any = None
[docs] def __init__(self) -> None:
super().__init__()
warnings.warn(
"""`OSSContentFormatter` will be deprecated in the future.
Please use `GPT2ContentFormatter` instead.
"""
)
[docs]class HFContentFormatter(ContentFormatterBase): | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html |
81b97065f8fd-3 | """
)
[docs]class HFContentFormatter(ContentFormatterBase):
"""Content handler for LLMs from the HuggingFace catalog."""
[docs] def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{"inputs": [f'"{prompt}"'], "parameters": model_kwargs}
)
return str.encode(request_payload)
[docs] def format_response_payload(self, output: bytes) -> str:
return json.loads(output)[0]["generated_text"]
[docs]class DollyContentFormatter(ContentFormatterBase):
"""Content handler for the Dolly-v2-12b model"""
[docs] def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{
"input_data": {"input_string": [f'"{prompt}"']},
"parameters": model_kwargs,
}
)
return str.encode(request_payload)
[docs] def format_response_payload(self, output: bytes) -> str:
return json.loads(output)[0]
[docs]class LlamaContentFormatter(ContentFormatterBase):
"""Content formatter for LLaMa"""
[docs] def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
"""Formats the request according to the chosen api"""
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{
"input_data": {
"input_string": [f'"{prompt}"'],
"parameters": model_kwargs,
}
}
) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html |
81b97065f8fd-4 | "parameters": model_kwargs,
}
}
)
return str.encode(request_payload)
[docs] def format_response_payload(self, output: bytes) -> str:
"""Formats response"""
return json.loads(output)[0]["0"]
[docs]class AzureMLOnlineEndpoint(LLM, BaseModel):
"""Azure ML Online Endpoint models.
Example:
.. code-block:: python
azure_llm = AzureMLOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score",
endpoint_api_key="my-api-key",
content_formatter=content_formatter,
)
""" # noqa: E501
endpoint_url: str = ""
"""URL of pre-existing Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_URL`."""
endpoint_api_key: str = ""
"""Authentication Key for Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_API_KEY`."""
deployment_name: str = ""
"""Deployment Name for Endpoint. NOT REQUIRED to call endpoint. Should be passed
to constructor or specified as env var `AZUREML_DEPLOYMENT_NAME`."""
http_client: Any = None #: :meta private:
content_formatter: Any = None
"""The content formatter that provides an input and output
transform function to handle formats between the LLM and
the endpoint"""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
@validator("http_client", always=True, allow_reuse=True)
@classmethod
def validate_client(cls, field_value: Any, values: Dict) -> AzureMLEndpointClient: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html |
81b97065f8fd-5 | """Validate that api key and python package exists in environment."""
endpoint_key = get_from_dict_or_env(
values, "endpoint_api_key", "AZUREML_ENDPOINT_API_KEY"
)
endpoint_url = get_from_dict_or_env(
values, "endpoint_url", "AZUREML_ENDPOINT_URL"
)
deployment_name = get_from_dict_or_env(
values, "deployment_name", "AZUREML_DEPLOYMENT_NAME", ""
)
http_client = AzureMLEndpointClient(endpoint_url, endpoint_key, deployment_name)
return http_client
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"deployment_name": self.deployment_name},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azureml_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to an AzureML Managed Online endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = azureml_model("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
request_payload = self.content_formatter.format_request_payload( | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html |
81b97065f8fd-6 | request_payload = self.content_formatter.format_request_payload(
prompt, _model_kwargs
)
response_payload = self.http_client.call(request_payload, **kwargs)
generated_text = self.content_formatter.format_response_payload(
response_payload
)
return generated_text | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html |
961c0eb8f5f8-0 | Source code for langchain.llms.minimax
"""Wrapper around Minimax APIs."""
from __future__ import annotations
import logging
from typing import (
Any,
Dict,
List,
Optional,
)
import requests
from langchain.callbacks.manager import (
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import BaseModel, Field, root_validator
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class _MinimaxEndpointClient(BaseModel):
"""An API client that talks to a Minimax llm endpoint."""
host: str
group_id: str
api_key: str
api_url: str
@root_validator(pre=True, allow_reuse=True)
def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if "api_url" not in values:
host = values["host"]
group_id = values["group_id"]
api_url = f"{host}/v1/text/chatcompletion?GroupId={group_id}"
values["api_url"] = api_url
return values
def post(self, request: Any) -> Any:
headers = {"Authorization": f"Bearer {self.api_key}"}
response = requests.post(self.api_url, headers=headers, json=request)
# TODO: error handling and automatic retries
if not response.ok:
raise ValueError(f"HTTP {response.status_code} error: {response.text}")
if response.json()["base_resp"]["status_code"] > 0:
raise ValueError(
f"API {response.json()['base_resp']['status_code']}" | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/minimax.html |
961c0eb8f5f8-1 | f"API {response.json()['base_resp']['status_code']}"
f" error: {response.json()['base_resp']['status_msg']}"
)
return response.json()["reply"]
[docs]class MinimaxCommon(BaseModel):
"""Common parameters for Minimax large language models."""
_client: Any = None
model: str = "abab5.5-chat"
"""Model name to use."""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
temperature: float = 0.7
"""A non-negative float that tunes the degree of randomness in generation."""
top_p: float = 0.95
"""Total probability mass of tokens to consider at each step."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
minimax_api_host: Optional[str] = None
minimax_group_id: Optional[str] = None
minimax_api_key: Optional[str] = None
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["minimax_api_key"] = get_from_dict_or_env(
values, "minimax_api_key", "MINIMAX_API_KEY"
)
values["minimax_group_id"] = get_from_dict_or_env(
values, "minimax_group_id", "MINIMAX_GROUP_ID"
)
# Get custom api url from environment.
values["minimax_api_host"] = get_from_dict_or_env(
values,
"minimax_api_host",
"MINIMAX_API_HOST", | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/minimax.html |
961c0eb8f5f8-2 | "minimax_api_host",
"MINIMAX_API_HOST",
default="https://api.minimax.chat",
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model,
"tokens_to_generate": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
**self.model_kwargs,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "minimax"
def __init__(self, **data: Any):
super().__init__(**data)
self._client = _MinimaxEndpointClient(
host=self.minimax_api_host,
api_key=self.minimax_api_key,
group_id=self.minimax_group_id,
)
[docs]class Minimax(MinimaxCommon, LLM):
"""Wrapper around Minimax large language models.
To use, you should have the environment variable
``MINIMAX_API_KEY`` and ``MINIMAX_GROUP_ID`` set with your API key,
or pass them as a named parameter to the constructor.
Example:
. code-block:: python
from langchain.llms.minimax import Minimax
minimax = Minimax(model="<model_name>", minimax_api_key="my-api-key",
minimax_group_id="my-group-id")
"""
def _call(
self, | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/minimax.html |
961c0eb8f5f8-3 | """
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to Minimax's completion endpoint to chat
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = minimax("Tell me a joke.")
"""
request = self._default_params
request["messages"] = [{"sender_type": "USER", "text": prompt}]
request.update(kwargs)
text = self._client.post(request)
if stop is not None:
# This is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/minimax.html |
8d42d338d2a0-0 | Source code for langchain.llms.xinference
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Mapping, Optional, Union
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
if TYPE_CHECKING:
from xinference.client import RESTfulChatModelHandle, RESTfulGenerateModelHandle
from xinference.model.llm.core import LlamaCppGenerateConfig
[docs]class Xinference(LLM):
"""Wrapper for accessing Xinference's large-scale model inference service.
To use, you should have the xinference library installed:
.. code-block:: bash
pip install "xinference[all]"
Check out: https://github.com/xorbitsai/inference
To run, you need to start a Xinference supervisor on one server and Xinference workers on the other servers
Example:
To start a local instance of Xinference, run
.. code-block:: bash
$ xinference
You can also deploy Xinference in a distributed cluster. Here are the steps:
Starting the supervisor:
.. code-block:: bash
$ xinference-supervisor
Starting the worker:
.. code-block:: bash
$ xinference-worker
Then, launch a model using command line interface (CLI).
Example:
.. code-block:: bash
$ xinference launch -n orca -s 3 -q q4_0
It will return a model UID. Then, you can use Xinference with LangChain.
Example:
.. code-block:: python
from langchain.llms import Xinference
llm = Xinference(
server_url="http://0.0.0.0:9997", | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/xinference.html |
8d42d338d2a0-1 | server_url="http://0.0.0.0:9997",
model_uid = {model_uid} # replace model_uid with the model UID return from launching the model
)
llm(
prompt="Q: where can we visit in the capital of France? A:",
generate_config={"max_tokens": 1024, "stream": True},
)
To view all the supported builtin models, run:
.. code-block:: bash
$ xinference list --all
""" # noqa: E501
client: Any
server_url: Optional[str]
"""URL of the xinference server"""
model_uid: Optional[str]
"""UID of the launched model"""
model_kwargs: Dict[str, Any]
"""Keyword arguments to be passed to xinference.LLM"""
def __init__(
self,
server_url: Optional[str] = None,
model_uid: Optional[str] = None,
**model_kwargs: Any,
):
try:
from xinference.client import RESTfulClient
except ImportError as e:
raise ImportError(
"Could not import RESTfulClient from xinference. Please install it"
" with `pip install xinference`."
) from e
model_kwargs = model_kwargs or {}
super().__init__(
**{
"server_url": server_url,
"model_uid": model_uid,
"model_kwargs": model_kwargs,
}
)
if self.server_url is None:
raise ValueError("Please provide server URL")
if self.model_uid is None:
raise ValueError("Please provide the model UID")
self.client = RESTfulClient(server_url)
@property | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/xinference.html |
8d42d338d2a0-2 | self.client = RESTfulClient(server_url)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "xinference"
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"server_url": self.server_url},
**{"model_uid": self.model_uid},
**{"model_kwargs": self.model_kwargs},
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the xinference model and return the output.
Args:
prompt: The prompt to use for generation.
stop: Optional list of stop words to use when generating.
generate_config: Optional dictionary for the configuration used for
generation.
Returns:
The generated string by the model.
"""
model = self.client.get_model(self.model_uid)
generate_config: "LlamaCppGenerateConfig" = kwargs.get("generate_config", {})
generate_config = {**self.model_kwargs, **generate_config}
if stop:
generate_config["stop"] = stop
if generate_config and generate_config.get("stream"):
combined_text_output = ""
for token in self._stream_generate(
model=model,
prompt=prompt,
run_manager=run_manager,
generate_config=generate_config,
):
combined_text_output += token
return combined_text_output
else:
completion = model.generate(prompt=prompt, generate_config=generate_config) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/xinference.html |
8d42d338d2a0-3 | else:
completion = model.generate(prompt=prompt, generate_config=generate_config)
return completion["choices"][0]["text"]
def _stream_generate(
self,
model: Union["RESTfulGenerateModelHandle", "RESTfulChatModelHandle"],
prompt: str,
run_manager: Optional[CallbackManagerForLLMRun] = None,
generate_config: Optional["LlamaCppGenerateConfig"] = None,
) -> Generator[str, None, None]:
"""
Args:
prompt: The prompt to use for generation.
model: The model used for generation.
stop: Optional list of stop words to use when generating.
generate_config: Optional dictionary for the configuration used for
generation.
Yields:
A string token.
"""
streaming_response = model.generate(
prompt=prompt, generate_config=generate_config
)
for chunk in streaming_response:
if isinstance(chunk, dict):
choices = chunk.get("choices", [])
if choices:
choice = choices[0]
if isinstance(choice, dict):
token = choice.get("text", "")
log_probs = choice.get("logprobs")
if run_manager:
run_manager.on_llm_new_token(
token=token, verbose=self.verbose, log_probs=log_probs
)
yield token | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/xinference.html |
b3a209a7cd08-0 | Source code for langchain.llms.together
"""Wrapper around Together AI's Completion API."""
import logging
from typing import Any, Dict, List, Optional
from aiohttp import ClientSession
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utilities.requests import Requests
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class Together(LLM):
"""Wrapper around Together AI models.
To use, you'll need an API key which you can find here:
https://api.together.xyz/settings/api-keys. This can be passed in as init param
``together_api_key`` or set as environment variable ``TOGETHER_API_KEY``.
Together AI API reference: https://docs.together.ai/reference/inference
"""
base_url: str = "https://api.together.xyz/inference"
"""Base inference API URL."""
together_api_key: str
"""Together AI API key. Get it here: https://api.together.xyz/settings/api-keys"""
model: str
"""Model name. Available models listed here:
https://docs.together.ai/docs/inference-models
"""
temperature: Optional[float] = None
"""Model temperature."""
top_p: Optional[float] = None
"""Used to dynamically adjust the number of choices for each predicted token based
on the cumulative probabilities. A value of 1 will always yield the same
output. A temperature less than 1 favors more correctness and is appropriate
for question answering or summarization. A value greater than 1 introduces more | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/together.html |
b3a209a7cd08-1 | for question answering or summarization. A value greater than 1 introduces more
randomness in the output.
"""
top_k: Optional[int] = None
"""Used to limit the number of choices for the next predicted word or token. It
specifies the maximum number of tokens to consider at each step, based on their
probability of occurrence. This technique helps to speed up the generation
process and can improve the quality of the generated text by focusing on the
most likely options.
"""
max_tokens: Optional[int] = None
"""The maximum number of tokens to generate."""
repetition_penalty: Optional[float] = None
"""A number that controls the diversity of generated text by reducing the
likelihood of repeated sequences. Higher values decrease repetition.
"""
logprobs: Optional[int] = None
"""An integer that specifies how many top token log probabilities are included in
the response for each token generation step.
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["together_api_key"] = get_from_dict_or_env(
values, "together_api_key", "TOGETHER_API_KEY"
)
return values
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "together"
def _format_output(self, output: dict) -> str:
return output["output"]["choices"][0]["text"]
[docs] @staticmethod
def get_user_agent() -> str: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/together.html |
b3a209a7cd08-2 | [docs] @staticmethod
def get_user_agent() -> str:
from langchain import __version__
return f"langchain/{__version__}"
@property
def default_params(self) -> Dict[str, Any]:
return {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"max_tokens": self.max_tokens,
"repetition_penalty": self.repetition_penalty,
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Together's text generation endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model..
"""
headers = {
"Authorization": f"Bearer {self.together_api_key}",
"Content-Type": "application/json",
}
stop_to_use = stop[0] if stop and len(stop) == 1 else stop
payload: Dict[str, Any] = {
**self.default_params,
"prompt": prompt,
"stop": stop_to_use,
**kwargs,
}
# filter None values to not pass them to the http payload
payload = {k: v for k, v in payload.items() if v is not None}
request = Requests(headers=headers)
response = request.post(url=self.base_url, data=payload)
if response.status_code >= 500: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/together.html |
b3a209a7cd08-3 | if response.status_code >= 500:
raise Exception(f"Together Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"Together received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"Together returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
data = response.json()
if data.get("status") != "finished":
err_msg = data.get("error", "Undefined Error")
raise Exception(err_msg)
output = self._format_output(data)
return output
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call Together model to get predictions based on the prompt.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
"""
headers = {
"Authorization": f"Bearer {self.together_api_key}",
"Content-Type": "application/json",
}
stop_to_use = stop[0] if stop and len(stop) == 1 else stop
payload: Dict[str, Any] = {
**self.default_params,
"prompt": prompt,
"stop": stop_to_use,
**kwargs,
}
# filter None values to not pass them to the http payload
payload = {k: v for k, v in payload.items() if v is not None}
async with ClientSession() as session: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/together.html |
b3a209a7cd08-4 | async with ClientSession() as session:
async with session.post(
self.base_url, json=payload, headers=headers
) as response:
if response.status >= 500:
raise Exception(f"Together Server: Error {response.status}")
elif response.status >= 400:
raise ValueError(
f"Together received an invalid payload: {response.text}"
)
elif response.status != 200:
raise Exception(
f"Together returned an unexpected response with status "
f"{response.status}: {response.text}"
)
response_json = await response.json()
if response_json.get("status") != "finished":
err_msg = response_json.get("error", "Undefined Error")
raise Exception(err_msg)
output = self._format_output(response_json)
return output | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/together.html |
1e22e36802e2-0 | Source code for langchain.llms.bittensor
import http.client
import json
import ssl
from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
[docs]class NIBittensorLLM(LLM):
"""NIBittensor LLMs
NIBittensorLLM is created by Neural Internet (https://neuralinternet.ai/),
powered by Bittensor, a decentralized network full of different AI models.
To analyze API_KEYS and logs of your usage visit
https://api.neuralinternet.ai/api-keys
https://api.neuralinternet.ai/logs
Example:
.. code-block:: python
from langchain.llms import NIBittensorLLM
llm = NIBittensorLLM()
"""
system_prompt: Optional[str]
"""Provide system prompt that you want to supply it to model before every prompt"""
top_responses: Optional[int] = 0
"""Provide top_responses to get Top N miner responses on one request.May get delayed
Don't use in Production"""
@property
def _llm_type(self) -> str:
return "NIBittensorLLM"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""
Wrapper around the bittensor top miner models. Its built by Neural Internet.
Call the Neural Internet's BTVEP Server and return the output.
Parameters (optional):
system_prompt(str): A system prompt defining how your model should respond. | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/bittensor.html |
1e22e36802e2-1 | system_prompt(str): A system prompt defining how your model should respond.
top_responses(int): Total top miner responses to retrieve from Bittensor
protocol.
Return:
The generated response(s).
Example:
.. code-block:: python
from langchain.llms import NIBittensorLLM
llm = NIBittensorLLM(system_prompt="Act like you are programmer with \
5+ years of experience.")
"""
# Creating HTTPS connection with SSL
context = ssl.create_default_context()
context.check_hostname = True
conn = http.client.HTTPSConnection("test.neuralinternet.ai", context=context)
# Sanitizing User Input before passing to API.
if isinstance(self.top_responses, int):
top_n = min(100, self.top_responses)
else:
top_n = 0
default_prompt = "You are an assistant which is created by Neural Internet(NI) \
in decentralized network named as a Bittensor."
if self.system_prompt is None:
system_prompt = (
default_prompt
+ " Your task is to provide accurate response based on user prompt"
)
else:
system_prompt = default_prompt + str(self.system_prompt)
# Retrieving API KEY to pass into header of each request
conn.request("GET", "/admin/api-keys/")
api_key_response = conn.getresponse()
api_keys_data = (
api_key_response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
api_keys_json = json.loads(api_keys_data)
api_key = api_keys_json[0]["api_key"]
# Creating Header and getting top benchmark miner uids
headers = { | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/bittensor.html |
1e22e36802e2-2 | # Creating Header and getting top benchmark miner uids
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
"Endpoint-Version": "2023-05-19",
}
conn.request("GET", "/top_miner_uids", headers=headers)
miner_response = conn.getresponse()
miner_data = (
miner_response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
uids = json.loads(miner_data)
# Condition for benchmark miner response
if isinstance(uids, list) and uids and not top_n:
for uid in uids:
try:
payload = json.dumps(
{
"uids": [uid],
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
}
)
conn.request("POST", "/chat", payload, headers)
init_response = conn.getresponse()
init_data = (
init_response.read()
.decode("utf-8")
.replace("\n", "")
.replace("\t", "")
)
init_json = json.loads(init_data)
if "choices" not in init_json:
continue
reply = init_json["choices"][0]["message"]["content"]
conn.close()
return reply
except Exception:
continue
# For top miner based on bittensor response
try:
payload = json.dumps(
{
"top_n": top_n,
"messages": [
{"role": "system", "content": system_prompt}, | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/bittensor.html |
1e22e36802e2-3 | "messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
}
)
conn.request("POST", "/chat", payload, headers)
response = conn.getresponse()
utf_string = (
response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
if top_n:
conn.close()
return utf_string
json_resp = json.loads(utf_string)
reply = json_resp["choices"][0]["message"]["content"]
conn.close()
return reply
except Exception as e:
conn.request("GET", f"/error_msg?e={e}&p={prompt}", headers=headers)
return "Sorry I am unable to provide response now, Please try again later."
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"system_prompt": self.system_prompt,
"top_responses": self.top_responses,
} | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/bittensor.html |
b919950053b7-0 | Source code for langchain.llms.deepinfra
import json
from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional
import aiohttp
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM, GenerationChunk
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utilities.requests import Requests
from langchain.utils import get_from_dict_or_env
DEFAULT_MODEL_ID = "google/flan-t5-xl"
[docs]class DeepInfra(LLM):
"""DeepInfra models.
To use, you should have the environment variable ``DEEPINFRA_API_TOKEN``
set with your API token, or pass it as a named parameter to the
constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import DeepInfra
di = DeepInfra(model_id="google/flan-t5-xl",
deepinfra_api_token="my-api-key")
"""
model_id: str = DEFAULT_MODEL_ID
model_kwargs: Optional[Dict] = None
deepinfra_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
deepinfra_api_token = get_from_dict_or_env(
values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN"
)
values["deepinfra_api_token"] = deepinfra_api_token
return values
@property | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html |
b919950053b7-1 | return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "deepinfra"
def _url(self) -> str:
return f"https://api.deepinfra.com/v1/inference/{self.model_id}"
def _headers(self) -> Dict:
return {
"Authorization": f"bearer {self.deepinfra_api_token}",
"Content-Type": "application/json",
}
def _body(self, prompt: str, kwargs: Any) -> Dict:
model_kwargs = self.model_kwargs or {}
model_kwargs = {**model_kwargs, **kwargs}
return {
"input": prompt,
**model_kwargs,
}
def _handle_status(self, code: int, text: Any) -> None:
if code >= 500:
raise Exception(f"DeepInfra Server: Error {code}")
elif code >= 400:
raise ValueError(f"DeepInfra received an invalid payload: {text}")
elif code != 200:
raise Exception(
f"DeepInfra returned an unexpected response with status "
f"{code}: {text}"
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html |
b919950053b7-2 | **kwargs: Any,
) -> str:
"""Call out to DeepInfra's inference API endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = di("Tell me a joke.")
"""
request = Requests(headers=self._headers())
response = request.post(url=self._url(), data=self._body(prompt, kwargs))
self._handle_status(response.status_code, response.text)
data = response.json()
return data["results"][0]["generated_text"]
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
request = Requests(headers=self._headers())
async with request.apost(
url=self._url(), data=self._body(prompt, kwargs)
) as response:
self._handle_status(response.status, response.text)
data = await response.json()
return data["results"][0]["generated_text"]
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
request = Requests(headers=self._headers())
response = request.post(
url=self._url(), data=self._body(prompt, {**kwargs, "stream": True})
) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html |
b919950053b7-3 | )
self._handle_status(response.status_code, response.text)
for line in _parse_stream(response.iter_lines()):
chunk = _handle_sse_line(line)
if chunk:
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
request = Requests(headers=self._headers())
async with request.apost(
url=self._url(), data=self._body(prompt, {**kwargs, "stream": True})
) as response:
self._handle_status(response.status, response.text)
async for line in _parse_stream_async(response.content):
chunk = _handle_sse_line(line)
if chunk:
yield chunk
if run_manager:
await run_manager.on_llm_new_token(chunk.text)
def _parse_stream(rbody: Iterator[bytes]) -> Iterator[str]:
for line in rbody:
_line = _parse_stream_helper(line)
if _line is not None:
yield _line
async def _parse_stream_async(rbody: aiohttp.StreamReader) -> AsyncIterator[str]:
async for line in rbody:
_line = _parse_stream_helper(line)
if _line is not None:
yield _line
def _parse_stream_helper(line: bytes) -> Optional[str]:
if line and line.startswith(b"data:"):
if line.startswith(b"data: "):
# SSE event may be valid when it contain whitespace | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html |
b919950053b7-4 | # SSE event may be valid when it contain whitespace
line = line[len(b"data: ") :]
else:
line = line[len(b"data:") :]
if line.strip() == b"[DONE]":
# return here will cause GeneratorExit exception in urllib3
# and it will close http connection with TCP Reset
return None
else:
return line.decode("utf-8")
return None
def _handle_sse_line(line: str) -> Optional[GenerationChunk]:
try:
obj = json.loads(line)
return GenerationChunk(
text=obj.get("token", {}).get("text"),
)
except Exception:
return None | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html |
9924e9867f8f-0 | Source code for langchain.llms.titan_takeoff_pro
from typing import Any, Iterator, List, Mapping, Optional
import requests
from requests.exceptions import ConnectionError
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.schema.output import GenerationChunk
[docs]class TitanTakeoffPro(LLM):
base_url: Optional[str] = "http://localhost:3000"
"""Specifies the baseURL to use for the Titan Takeoff Pro API.
Default = http://localhost:3000.
"""
max_new_tokens: Optional[int] = None
"""Maximum tokens generated."""
min_new_tokens: Optional[int] = None
"""Minimum tokens generated."""
sampling_topk: Optional[int] = None
"""Sample predictions from the top K most probable candidates."""
sampling_topp: Optional[float] = None
"""Sample from predictions whose cumulative probability exceeds this value.
"""
sampling_temperature: Optional[float] = None
"""Sample with randomness. Bigger temperatures are associated with
more randomness and 'creativity'.
"""
repetition_penalty: Optional[float] = None
"""Penalise the generation of tokens that have been generated before.
Set to > 1 to penalize.
"""
regex_string: Optional[str] = None
"""A regex string for constrained generation."""
no_repeat_ngram_size: Optional[int] = None
"""Prevent repetitions of ngrams of this size. Default = 0 (turned off)."""
streaming: bool = False
"""Whether to stream the output. Default = False."""
@property
def _default_params(self) -> Mapping[str, Any]: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/titan_takeoff_pro.html |
9924e9867f8f-1 | @property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Titan Takeoff Server (Pro)."""
return {
**(
{"regex_string": self.regex_string}
if self.regex_string is not None
else {}
),
**(
{"sampling_temperature": self.sampling_temperature}
if self.sampling_temperature is not None
else {}
),
**(
{"sampling_topp": self.sampling_topp}
if self.sampling_topp is not None
else {}
),
**(
{"repetition_penalty": self.repetition_penalty}
if self.repetition_penalty is not None
else {}
),
**(
{"max_new_tokens": self.max_new_tokens}
if self.max_new_tokens is not None
else {}
),
**(
{"min_new_tokens": self.min_new_tokens}
if self.min_new_tokens is not None
else {}
),
**(
{"sampling_topk": self.sampling_topk}
if self.sampling_topk is not None
else {}
),
**(
{"no_repeat_ngram_size": self.no_repeat_ngram_size}
if self.no_repeat_ngram_size is not None
else {}
),
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "titan_takeoff_pro"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any, | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/titan_takeoff_pro.html |
9924e9867f8f-2 | **kwargs: Any,
) -> str:
"""Call out to Titan Takeoff (Pro) generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
"""
try:
if self.streaming:
text_output = ""
for chunk in self._stream(
prompt=prompt,
stop=stop,
run_manager=run_manager,
):
text_output += chunk.text
return text_output
url = f"{self.base_url}/generate"
params = {"text": prompt, **self._default_params}
response = requests.post(url, json=params)
response.raise_for_status()
response.encoding = "utf-8"
text = ""
if "text" in response.json():
text = response.json()["text"]
text = text.replace("</s>", "")
else:
raise ValueError("Something went wrong.")
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
except ConnectionError:
raise ConnectionError(
"Could not connect to Titan Takeoff (Pro) server. \
Please make sure that the server is running."
)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/titan_takeoff_pro.html |
9924e9867f8f-3 | **kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call out to Titan Takeoff (Pro) stream endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Yields:
A dictionary like object containing a string token.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
"""
url = f"{self.base_url}/generate_stream"
params = {"text": prompt, **self._default_params}
response = requests.post(url, json=params, stream=True)
response.encoding = "utf-8"
buffer = ""
for text in response.iter_content(chunk_size=1, decode_unicode=True):
buffer += text
if "data:" in buffer:
# Remove the first instance of "data:" from the buffer.
if buffer.startswith("data:"):
buffer = ""
if len(buffer.split("data:", 1)) == 2:
content, _ = buffer.split("data:", 1)
buffer = content.rstrip("\n")
# Trim the buffer to only have content after the "data:" part.
if buffer: # Ensure that there's content to process.
chunk = GenerationChunk(text=buffer)
buffer = "" # Reset buffer for the next set of data.
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
# Yield any remaining content in the buffer.
if buffer:
chunk = GenerationChunk(text=buffer.replace("</s>", ""))
yield chunk | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/titan_takeoff_pro.html |
9924e9867f8f-4 | chunk = GenerationChunk(text=buffer.replace("</s>", ""))
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"base_url": self.base_url, **{}, **self._default_params} | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/titan_takeoff_pro.html |
e6b77633a886-0 | Source code for langchain.llms.yandex
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.load.serializable import Serializable
from langchain.pydantic_v1 import root_validator
from langchain.utils import get_from_dict_or_env
class _BaseYandexGPT(Serializable):
iam_token: str = ""
"""Yandex Cloud IAM token for service account
with the `ai.languageModels.user` role"""
api_key: str = ""
"""Yandex Cloud Api Key for service account
with the `ai.languageModels.user` role"""
model_name: str = "general"
"""Model name to use."""
temperature: float = 0.6
"""What sampling temperature to use.
Should be a double number between 0 (inclusive) and 1 (inclusive)."""
max_tokens: int = 7400
"""Sets the maximum limit on the total number of tokens
used for both the input prompt and the generated response.
Must be greater than zero and not exceed 7400 tokens."""
stop: Optional[List[str]] = None
"""Sequences when completion generation will stop."""
url: str = "llm.api.cloud.yandex.net:443"
"""The url of the API."""
@property
def _llm_type(self) -> str:
return "yandex_gpt"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that iam token exists in environment.""" | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/yandex.html |
e6b77633a886-1 | """Validate that iam token exists in environment."""
iam_token = get_from_dict_or_env(values, "iam_token", "YC_IAM_TOKEN", "")
values["iam_token"] = iam_token
api_key = get_from_dict_or_env(values, "api_key", "YC_API_KEY", "")
values["api_key"] = api_key
if api_key == "" and iam_token == "":
raise ValueError("Either 'YC_API_KEY' or 'YC_IAM_TOKEN' must be provided.")
return values
[docs]class YandexGPT(_BaseYandexGPT, LLM):
"""Yandex large language models.
To use, you should have the ``yandexcloud`` python package installed.
There are two authentication options for the service account
with the ``ai.languageModels.user`` role:
- You can specify the token in a constructor parameter `iam_token`
or in an environment variable `YC_IAM_TOKEN`.
- You can specify the key in a constructor parameter `api_key`
or in an environment variable `YC_API_KEY`.
Example:
.. code-block:: python
from langchain.llms import YandexGPT
yandex_gpt = YandexGPT(iam_token="t1.9eu...")
"""
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_name": self.model_name,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"stop": self.stop,
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None, | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/yandex.html |
e6b77633a886-2 | prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the Yandex GPT model and return the output.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = YandexGPT("Tell me a joke.")
"""
try:
import grpc
from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value
from yandex.cloud.ai.llm.v1alpha.llm_pb2 import GenerationOptions
from yandex.cloud.ai.llm.v1alpha.llm_service_pb2 import InstructRequest
from yandex.cloud.ai.llm.v1alpha.llm_service_pb2_grpc import (
TextGenerationServiceStub,
)
except ImportError as e:
raise ImportError(
"Please install YandexCloud SDK" " with `pip install yandexcloud`."
) from e
channel_credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(self.url, channel_credentials)
request = InstructRequest(
model=self.model_name,
request_text=prompt,
generation_options=GenerationOptions(
temperature=DoubleValue(value=self.temperature),
max_tokens=Int64Value(value=self.max_tokens),
),
)
stub = TextGenerationServiceStub(channel)
if self.iam_token:
metadata = (("authorization", f"Bearer {self.iam_token}"),)
else: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/yandex.html |
e6b77633a886-3 | else:
metadata = (("authorization", f"Api-Key {self.api_key}"),)
res = stub.Instruct(request, metadata=metadata)
text = list(res)[0].alternatives[0].text
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Async call the Yandex GPT model and return the output.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
try:
import asyncio
import grpc
from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value
from yandex.cloud.ai.llm.v1alpha.llm_pb2 import GenerationOptions
from yandex.cloud.ai.llm.v1alpha.llm_service_pb2 import (
InstructRequest,
InstructResponse,
)
from yandex.cloud.ai.llm.v1alpha.llm_service_pb2_grpc import (
TextGenerationAsyncServiceStub,
)
from yandex.cloud.operation.operation_service_pb2 import GetOperationRequest
from yandex.cloud.operation.operation_service_pb2_grpc import (
OperationServiceStub,
)
except ImportError as e:
raise ImportError(
"Please install YandexCloud SDK" " with `pip install yandexcloud`."
) from e
operation_api_url = "operation.api.cloud.yandex.net:443" | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/yandex.html |
e6b77633a886-4 | operation_api_url = "operation.api.cloud.yandex.net:443"
channel_credentials = grpc.ssl_channel_credentials()
async with grpc.aio.secure_channel(self.url, channel_credentials) as channel:
request = InstructRequest(
model=self.model_name,
request_text=prompt,
generation_options=GenerationOptions(
temperature=DoubleValue(value=self.temperature),
max_tokens=Int64Value(value=self.max_tokens),
),
)
stub = TextGenerationAsyncServiceStub(channel)
if self.iam_token:
metadata = (("authorization", f"Bearer {self.iam_token}"),)
else:
metadata = (("authorization", f"Api-Key {self.api_key}"),)
operation = await stub.Instruct(request, metadata=metadata)
async with grpc.aio.secure_channel(
operation_api_url, channel_credentials
) as operation_channel:
operation_stub = OperationServiceStub(operation_channel)
while not operation.done:
await asyncio.sleep(1)
operation_request = GetOperationRequest(operation_id=operation.id)
operation = await operation_stub.Get(
operation_request, metadata=metadata
)
instruct_response = InstructResponse()
operation.response.Unpack(instruct_response)
text = instruct_response.alternatives[0].text
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/yandex.html |
c642eaca5eef-0 | Source code for langchain.llms.deepsparse
# flake8: noqa
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Union
from langchain.pydantic_v1 import root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.schema.output import GenerationChunk
[docs]class DeepSparse(LLM):
"""Neural Magic DeepSparse LLM interface.
To use, you should have the ``deepsparse`` or ``deepsparse-nightly``
python package installed. See https://github.com/neuralmagic/deepsparse
This interface let's you deploy optimized LLMs straight from the
[SparseZoo](https://sparsezoo.neuralmagic.com/?useCase=text_generation)
Example:
.. code-block:: python
from langchain.llms import DeepSparse
llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none")
""" # noqa: E501
pipeline: Any #: :meta private:
model: str
"""The path to a model file or directory or the name of a SparseZoo model stub."""
model_config: Optional[Dict[str, Any]] = None
"""Keyword arguments passed to the pipeline construction.
Common parameters are sequence_length, prompt_sequence_length"""
generation_config: Union[None, str, Dict] = None
"""GenerationConfig dictionary consisting of parameters used to control
sequences generated for each prompt. Common parameters are:
max_length, max_new_tokens, num_return_sequences, output_scores, | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/deepsparse.html |
c642eaca5eef-1 | max_length, max_new_tokens, num_return_sequences, output_scores,
top_p, top_k, repetition_penalty."""
streaming: bool = False
"""Whether to stream the results, token by token."""
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"model_config": self.model_config,
"generation_config": self.generation_config,
"streaming": self.streaming,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "deepsparse"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that ``deepsparse`` package is installed."""
try:
from deepsparse import Pipeline
except ImportError:
raise ImportError(
"Could not import `deepsparse` package. "
"Please install it with `pip install deepsparse`"
)
model_config = values["model_config"] or {}
values["pipeline"] = Pipeline.create(
task="text_generation",
model_path=values["model"],
**model_config,
)
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/deepsparse.html |
c642eaca5eef-2 | Returns:
The generated text.
Example:
.. code-block:: python
from langchain.llms import DeepSparse
llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none")
llm("Tell me a joke.")
"""
if self.streaming:
combined_output = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
combined_output += chunk.text
text = combined_output
else:
text = (
self.pipeline(
sequences=prompt, generation_config=self.generation_config
)
.generations[0]
.text
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain.llms import DeepSparse
llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none")
llm("Tell me a joke.")
"""
if self.streaming:
combined_output = "" | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/deepsparse.html |
c642eaca5eef-3 | """
if self.streaming:
combined_output = ""
async for chunk in self._astream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
combined_output += chunk.text
text = combined_output
else:
text = (
self.pipeline(
sequences=prompt, generation_config=self.generation_config
)
.generations[0]
.text
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like object containing a string token.
Example:
.. code-block:: python
from langchain.llms import DeepSparse
llm = DeepSparse(
model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none",
streaming=True
)
for chunk in llm.stream("Tell me a joke",
stop=["'","\n"]):
print(chunk, end='', flush=True) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/deepsparse.html |
c642eaca5eef-4 | stop=["'","\n"]):
print(chunk, end='', flush=True)
"""
inference = self.pipeline(
sequences=prompt, generation_config=self.generation_config, streaming=True
)
for token in inference:
chunk = GenerationChunk(text=token.generations[0].text)
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like object containing a string token.
Example:
.. code-block:: python
from langchain.llms import DeepSparse
llm = DeepSparse(
model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none",
streaming=True
)
for chunk in llm.stream("Tell me a joke",
stop=["'","\n"]):
print(chunk, end='', flush=True)
"""
inference = self.pipeline(
sequences=prompt, generation_config=self.generation_config, streaming=True
) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/deepsparse.html |
c642eaca5eef-5 | sequences=prompt, generation_config=self.generation_config, streaming=True
)
for token in inference:
chunk = GenerationChunk(text=token.generations[0].text)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(token=chunk.text) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/deepsparse.html |
319b845ed006-0 | Source code for langchain.llms.baseten
import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Field
logger = logging.getLogger(__name__)
[docs]class Baseten(LLM):
"""Baseten models.
To use, you should have the ``baseten`` python package installed,
and run ``baseten.login()`` with your Baseten API key.
The required ``model`` param can be either a model id or model
version id. Using a model version ID will result in
slightly faster invocation.
Any other model parameters can also
be passed in with the format input={model_param: value, ...}
The Baseten model must accept a dictionary of input with the key
"prompt" and return a dictionary with a key "data" which maps
to a list of response strings.
Example:
.. code-block:: python
from langchain.llms import Baseten
my_model = Baseten(model="MODEL_ID")
output = my_model("prompt")
"""
model: str
input: Dict[str, Any] = Field(default_factory=dict)
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "baseten"
def _call(
self, | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/baseten.html |
319b845ed006-1 | return "baseten"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Baseten deployed model endpoint."""
try:
import baseten
except ImportError as exc:
raise ImportError(
"Could not import Baseten Python package. "
"Please install it with `pip install baseten`."
) from exc
# get the model and version
try:
model = baseten.deployed_model_version_id(self.model)
response = model.predict({"prompt": prompt, **kwargs})
except baseten.common.core.ApiError:
model = baseten.deployed_model_id(self.model)
response = model.predict({"prompt": prompt, **kwargs})
return "".join(response) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/baseten.html |
463f176f76a2-0 | Source code for langchain.llms.vertexai
from __future__ import annotations
from concurrent.futures import Executor, ThreadPoolExecutor
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Dict,
Iterator,
List,
Optional,
Union,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import BaseLLM, create_base_retry_decorator
from langchain.pydantic_v1 import BaseModel, Field, root_validator
from langchain.schema import (
Generation,
LLMResult,
)
from langchain.schema.output import GenerationChunk
from langchain.utilities.vertexai import (
get_client_info,
init_vertexai,
raise_vertex_import_error,
)
if TYPE_CHECKING:
from google.cloud.aiplatform.gapic import (
PredictionServiceAsyncClient,
PredictionServiceClient,
)
from vertexai.language_models._language_models import (
TextGenerationResponse,
_LanguageModel,
)
def _response_to_generation(
response: TextGenerationResponse,
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
try:
generation_info = {
"is_blocked": response.is_blocked,
"safety_attributes": response.safety_attributes,
}
except Exception:
generation_info = None
return GenerationChunk(text=response.text, generation_info=generation_info)
[docs]def is_codey_model(model_name: str) -> bool:
"""Returns True if the model name is a Codey model.
Args:
model_name: The model name to check. | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html |
463f176f76a2-1 | Args:
model_name: The model name to check.
Returns: True if the model name is a Codey model.
"""
return "code" in model_name
def _create_retry_decorator(
llm: VertexAI,
*,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
import google.api_core
errors = [
google.api_core.exceptions.ResourceExhausted,
google.api_core.exceptions.ServiceUnavailable,
google.api_core.exceptions.Aborted,
google.api_core.exceptions.DeadlineExceeded,
]
decorator = create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
return decorator
[docs]def completion_with_retry(
llm: VertexAI,
*args: Any,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(*args: Any, **kwargs: Any) -> Any:
return llm.client.predict(*args, **kwargs)
return _completion_with_retry(*args, **kwargs)
[docs]def stream_completion_with_retry(
llm: VertexAI,
*args: Any,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call.""" | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html |
463f176f76a2-2 | ) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(*args: Any, **kwargs: Any) -> Any:
return llm.client.predict_streaming(*args, **kwargs)
return _completion_with_retry(*args, **kwargs)
[docs]async def acompletion_with_retry(
llm: VertexAI,
*args: Any,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _acompletion_with_retry(*args: Any, **kwargs: Any) -> Any:
return await llm.client.predict_async(*args, **kwargs)
return await _acompletion_with_retry(*args, **kwargs)
class _VertexAIBase(BaseModel):
project: Optional[str] = None
"The default GCP project to use when making Vertex API calls."
location: str = "us-central1"
"The default location to use when making API calls."
request_parallelism: int = 5
"The amount of parallelism allowed for requests issued to VertexAI models. "
"Default is 5."
max_retries: int = 6
"""The maximum number of retries to make when generating."""
task_executor: ClassVar[Optional[Executor]] = Field(default=None, exclude=True)
stop: Optional[List[str]] = None
"Optional list of stop words to use when generating." | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html |
463f176f76a2-3 | "Optional list of stop words to use when generating."
model_name: Optional[str] = None
"Underlying model name."
@classmethod
def _get_task_executor(cls, request_parallelism: int = 5) -> Executor:
if cls.task_executor is None:
cls.task_executor = ThreadPoolExecutor(max_workers=request_parallelism)
return cls.task_executor
class _VertexAICommon(_VertexAIBase):
client: "_LanguageModel" = None #: :meta private:
model_name: str
"Underlying model name."
temperature: float = 0.0
"Sampling temperature, it controls the degree of randomness in token selection."
max_output_tokens: int = 128
"Token limit determines the maximum amount of text output from one prompt."
top_p: float = 0.95
"Tokens are selected from most probable to least until the sum of their "
"probabilities equals the top-p value. Top-p is ignored for Codey models."
top_k: int = 40
"How the model selects tokens for output, the next token is selected from "
"among the top-k most probable tokens. Top-k is ignored for Codey models."
credentials: Any = Field(default=None, exclude=True)
"The default custom credentials (google.auth.credentials.Credentials) to use "
"when making API calls. If not provided, credentials will be ascertained from "
"the environment."
n: int = 1
"""How many completions to generate for each prompt."""
streaming: bool = False
"""Whether to stream the results or not."""
@property
def _llm_type(self) -> str:
return "vertexai"
@property | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.