id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,287,700 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/qdrant_fast_embed/src/__init__.py | from .qdrant_fast_embed import QdrantFastEmbedM
metadata = {
"name": QdrantFastEmbedM.__name__,
"version": "1.0.0",
"adapter": QdrantFastEmbedM,
"description": "QdrantFastEmbed embedding adapter",
"is_active": False,
}
| 240 | Python | .py | 8 | 26.375 | 55 | 0.69697 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,701 | qdrant_fast_embed.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/qdrant_fast_embed/src/qdrant_fast_embed.py | import os
from typing import Any
from llama_index.core.embeddings import BaseEmbedding
from llama_index.embeddings.fastembed import FastEmbedEmbedding
from unstract.sdk.adapters.embedding.embedding_adapter import EmbeddingAdapter
from unstract.sdk.adapters.embedding.helper import EmbeddingHelper
from unstract.sdk.adapters.exceptions import AdapterError
class Constants:
MODEL = "model_name"
ADAPTER_NAME = "adapter_name"
class QdrantFastEmbedM(EmbeddingAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("QdrantFastEmbedM")
self.config = settings
@staticmethod
def get_id() -> str:
return "qdrantfastembed|31e83eee-a416-4c07-9c9c-02392d5bcf7f"
@staticmethod
def get_name() -> str:
return "QdrantFastEmbedM"
@staticmethod
def get_description() -> str:
return "QdrantFastEmbedM LLM"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/qdrant.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_embedding_instance(self) -> BaseEmbedding:
try:
embedding: BaseEmbedding = FastEmbedEmbedding(
model_name=str(self.config.get(Constants.MODEL))
)
return embedding
except Exception as e:
raise AdapterError(str(e))
def test_connection(self) -> bool:
embedding = self.get_embedding_instance()
test_result: bool = EmbeddingHelper.test_embedding_instance(embedding)
return test_result
| 1,679 | Python | .py | 44 | 31.318182 | 78 | 0.689889 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,702 | palm.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/palm/src/palm.py | import os
from typing import Any
from llama_index.core.embeddings import BaseEmbedding
from llama_index.embeddings.google import GooglePaLMEmbedding
from unstract.sdk.adapters.embedding.embedding_adapter import EmbeddingAdapter
from unstract.sdk.adapters.embedding.helper import EmbeddingHelper
from unstract.sdk.adapters.exceptions import AdapterError
class Constants:
MODEL = "model_name"
API_KEY = "api_key"
ADAPTER_NAME = "adapter_name"
class PaLM(EmbeddingAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("Palm")
self.config = settings
@staticmethod
def get_id() -> str:
return "palm|a3fc9fda-f02f-405f-bb26-8bd2ace4317e"
@staticmethod
def get_name() -> str:
return "Palm"
@staticmethod
def get_description() -> str:
return "PaLM Embedding"
@staticmethod
def get_provider() -> str:
return "palm"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/PaLM.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_embedding_instance(self) -> BaseEmbedding:
try:
embedding_batch_size = EmbeddingHelper.get_embedding_batch_size(
config=self.config
)
embedding: BaseEmbedding = GooglePaLMEmbedding(
model_name=str(self.config.get(Constants.MODEL)),
api_key=str(self.config.get(Constants.API_KEY)),
embed_batch_size=embedding_batch_size,
)
return embedding
except Exception as e:
raise AdapterError(str(e))
def test_connection(self) -> bool:
embedding = self.get_embedding_instance()
test_result: bool = EmbeddingHelper.test_embedding_instance(embedding)
return test_result
| 1,966 | Python | .py | 53 | 29.490566 | 78 | 0.659821 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,703 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/palm/src/__init__.py | from .palm import PaLM
metadata = {
"name": PaLM.__name__,
"version": "1.0.0",
"adapter": PaLM,
"description": "PaLM embedding adapter",
"is_active": True,
}
| 179 | Python | .py | 8 | 18.75 | 44 | 0.6 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,704 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/open_ai/src/__init__.py | from .open_ai import OpenAI
metadata = {
"name": OpenAI.__name__,
"version": "1.0.0",
"adapter": OpenAI,
"description": "OpenAI embedding adapter",
"is_active": True,
}
| 190 | Python | .py | 8 | 20.125 | 46 | 0.618785 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,705 | open_ai.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/open_ai/src/open_ai.py | import os
from typing import Any
import httpx
from llama_index.core.embeddings import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from unstract.sdk.adapters.embedding.embedding_adapter import EmbeddingAdapter
from unstract.sdk.adapters.embedding.helper import EmbeddingHelper
from unstract.sdk.adapters.exceptions import AdapterError
class Constants:
API_KEY = "api_key"
MODEL = "model"
API_BASE_VALUE = "https://api.openai.com/v1/"
API_BASE_KEY = "api_base"
ADAPTER_NAME = "adapter_name"
API_TYPE = "openai"
TIMEOUT = "timeout"
DEFAULT_TIMEOUT = 240
class OpenAI(EmbeddingAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("OpenAI")
self.config = settings
@staticmethod
def get_id() -> str:
return "openai|717a0b0e-3bbc-41dc-9f0c-5689437a1151"
@staticmethod
def get_name() -> str:
return "OpenAI"
@staticmethod
def get_description() -> str:
return "OpenAI LLM"
@staticmethod
def get_provider() -> str:
return "openai"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/OpenAI.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_embedding_instance(self) -> BaseEmbedding:
try:
timeout = int(self.config.get(Constants.TIMEOUT, Constants.DEFAULT_TIMEOUT))
httpx_timeout = httpx.Timeout(10.0, connect=60.0)
httpx_client = httpx.Client(timeout=httpx_timeout)
embedding: BaseEmbedding = OpenAIEmbedding(
api_key=str(self.config.get(Constants.API_KEY)),
api_base=str(
self.config.get(Constants.API_BASE_KEY, Constants.API_BASE_VALUE)
),
model=str(self.config.get(Constants.MODEL)),
api_type=Constants.API_TYPE,
timeout=timeout,
http_client=httpx_client,
)
return embedding
except Exception as e:
raise AdapterError(str(e))
def test_connection(self) -> bool:
embedding = self.get_embedding_instance()
test_result: bool = EmbeddingHelper.test_embedding_instance(embedding)
return test_result
| 2,411 | Python | .py | 64 | 29.640625 | 88 | 0.645949 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,706 | hugging_face.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/hugging_face/src/hugging_face.py | import os
from typing import Any, Optional
from llama_index.core.embeddings import BaseEmbedding
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from unstract.sdk.adapters.embedding.embedding_adapter import EmbeddingAdapter
from unstract.sdk.adapters.embedding.helper import EmbeddingHelper
from unstract.sdk.adapters.exceptions import AdapterError
class Constants:
ADAPTER_NAME = "adapter_name"
MODEL = "model_name"
TOKENIZER_NAME = "tokenizer_name"
MAX_LENGTH = "max_length"
NORMALIZE = "normalize"
class HuggingFace(EmbeddingAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("HuggingFace")
self.config = settings
@staticmethod
def get_id() -> str:
return "huggingface|90ec9ec2-1768-4d69-8fb1-c88b95de5e5a"
@staticmethod
def get_name() -> str:
return "HuggingFace"
@staticmethod
def get_description() -> str:
return "HuggingFace Embedding"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/huggingface.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_embedding_instance(self) -> BaseEmbedding:
try:
embedding_batch_size = EmbeddingHelper.get_embedding_batch_size(
config=self.config
)
max_length: Optional[int] = (
int(self.config.get(Constants.MAX_LENGTH, 0))
if self.config.get(Constants.MAX_LENGTH)
else None
)
embedding: BaseEmbedding = HuggingFaceEmbedding(
model_name=str(self.config.get(Constants.MODEL)),
tokenizer_name=str(self.config.get(Constants.TOKENIZER_NAME)),
normalize=bool(self.config.get(Constants.NORMALIZE)),
embed_batch_size=embedding_batch_size,
max_length=max_length,
)
return embedding
except Exception as e:
raise AdapterError(str(e))
def test_connection(self) -> bool:
embedding = self.get_embedding_instance()
test_result: bool = EmbeddingHelper.test_embedding_instance(embedding)
return test_result
| 2,350 | Python | .py | 59 | 31.271186 | 78 | 0.655687 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,707 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/hugging_face/src/__init__.py | from .hugging_face import HuggingFace
metadata = {
"name": HuggingFace.__name__,
"version": "1.0.0",
"adapter": HuggingFace,
"description": "HuggingFace embedding adapter",
"is_active": False,
}
| 216 | Python | .py | 8 | 23.375 | 51 | 0.666667 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,708 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/ollama/src/__init__.py | from .ollama import Ollama
metadata = {
"name": Ollama.__name__,
"version": "1.0.0",
"adapter": Ollama,
"description": "Ollama embedding adapter",
"is_active": True,
}
| 189 | Python | .py | 8 | 20 | 46 | 0.622222 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,709 | ollama.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/ollama/src/ollama.py | import os
from typing import Any
from llama_index.core.embeddings import BaseEmbedding
from llama_index.embeddings.ollama import OllamaEmbedding
from unstract.sdk.adapters.embedding.embedding_adapter import EmbeddingAdapter
from unstract.sdk.adapters.embedding.helper import EmbeddingHelper
from unstract.sdk.adapters.exceptions import AdapterError
class Constants:
MODEL = "model_name"
ADAPTER_NAME = "adapter_name"
BASE_URL = "base_url"
class Ollama(EmbeddingAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("Ollama")
self.config = settings
@staticmethod
def get_id() -> str:
return "ollama|d58d7080-55a9-4542-becd-8433528e127b"
@staticmethod
def get_name() -> str:
return "Ollama"
@staticmethod
def get_description() -> str:
return "Ollama Embedding"
@staticmethod
def get_provider() -> str:
return "ollama"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/ollama.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_embedding_instance(self) -> BaseEmbedding:
try:
embedding_batch_size = EmbeddingHelper.get_embedding_batch_size(
config=self.config
)
embedding: BaseEmbedding = OllamaEmbedding(
model_name=str(self.config.get(Constants.MODEL)),
base_url=str(self.config.get(Constants.BASE_URL)),
embed_batch_size=embedding_batch_size,
)
return embedding
except Exception as e:
raise AdapterError(str(e))
def test_connection(self) -> bool:
embedding = self.get_embedding_instance()
test_result: bool = EmbeddingHelper.test_embedding_instance(embedding)
return test_result
| 1,976 | Python | .py | 53 | 29.679245 | 78 | 0.661603 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,710 | azure_open_ai.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/azure_open_ai/src/azure_open_ai.py | import os
from typing import Any
import httpx
from llama_index.core.embeddings import BaseEmbedding
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
from unstract.sdk.adapters.embedding.embedding_adapter import EmbeddingAdapter
from unstract.sdk.adapters.embedding.helper import EmbeddingHelper
from unstract.sdk.adapters.exceptions import AdapterError
class Constants:
ADAPTER_NAME = "adapter_name"
MODEL = "model"
API_KEY = "api_key"
API_VERSION = "api_version"
AZURE_ENDPOINT = "azure_endpoint"
DEPLOYMENT_NAME = "deployment_name"
API_TYPE = "azure"
TIMEOUT = "timeout"
DEFAULT_TIMEOUT = 240
class AzureOpenAI(EmbeddingAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("AzureOpenAIEmbedding")
self.config = settings
@staticmethod
def get_id() -> str:
return "azureopenai|9770f3f6-f8ba-4fa0-bb3a-bef48a00e66f"
@staticmethod
def get_name() -> str:
return "AzureOpenAIEmbedding"
@staticmethod
def get_description() -> str:
return "AzureOpenAI Embedding"
@staticmethod
def get_provider() -> str:
return "azure"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/AzureopenAI.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_embedding_instance(self) -> BaseEmbedding:
try:
embedding_batch_size = EmbeddingHelper.get_embedding_batch_size(
config=self.config
)
timeout = int(self.config.get(Constants.TIMEOUT, Constants.DEFAULT_TIMEOUT))
httpx_timeout = httpx.Timeout(timeout, connect=60.0)
httpx_client = httpx.Client(timeout=httpx_timeout)
embedding: BaseEmbedding = AzureOpenAIEmbedding(
model=str(self.config.get(Constants.MODEL)),
deployment_name=str(self.config.get(Constants.DEPLOYMENT_NAME)),
api_key=str(self.config.get(Constants.API_KEY)),
api_version=str(self.config.get(Constants.API_VERSION)),
azure_endpoint=str(self.config.get(Constants.AZURE_ENDPOINT)),
embed_batch_size=embedding_batch_size,
api_type=Constants.API_TYPE,
timeout=timeout,
http_client=httpx_client,
)
return embedding
except Exception as e:
raise AdapterError(str(e))
def test_connection(self) -> bool:
embedding = self.get_embedding_instance()
test_result: bool = EmbeddingHelper.test_embedding_instance(embedding)
return test_result
| 2,791 | Python | .py | 69 | 32.115942 | 88 | 0.661743 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,711 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/azure_open_ai/src/__init__.py | from .azure_open_ai import AzureOpenAI
metadata = {
"name": AzureOpenAI.__name__,
"version": "1.0.0",
"adapter": AzureOpenAI,
"description": "AzureOpenAI embedding adapter",
"is_active": True,
}
| 216 | Python | .py | 8 | 23.375 | 51 | 0.661836 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,712 | no_op_embedding.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/no_op/src/no_op_embedding.py | import os
import time
from typing import Any
from llama_index.core.embeddings import BaseEmbedding
from unstract.sdk.adapters.embedding.embedding_adapter import EmbeddingAdapter
from unstract.sdk.adapters.embedding.no_op.src.no_op_custom_embedding import (
NoOpCustomEmbedding,
)
class NoOpEmbedding(EmbeddingAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("NoOpCustomEmbedding")
self.config = settings
@staticmethod
def get_id() -> str:
return "noOpEmbedding|ff223003-fee8-4079-b288-e86215e6b39a"
@staticmethod
def get_name() -> str:
return "No Op Embedding"
@staticmethod
def get_description() -> str:
return "No Op Embedding"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/noOpEmbedding.png"
@staticmethod
def get_provider() -> str:
return "NoOp"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_embedding_instance(self) -> BaseEmbedding:
embedding: BaseEmbedding = NoOpCustomEmbedding(
embed_dim=1, wait_time=self.config.get("wait_time")
)
return embedding
def test_connection(self) -> bool:
time.sleep(self.config.get("wait_time"))
return True
| 1,419 | Python | .py | 41 | 28.341463 | 78 | 0.673499 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,713 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/no_op/src/__init__.py | from .no_op_embedding import NoOpEmbedding
metadata = {
"name": NoOpEmbedding.__name__,
"version": "1.0.0",
"adapter": NoOpEmbedding,
"description": "NoOp embedding adapter",
"is_active": True,
}
| 217 | Python | .py | 8 | 23.5 | 44 | 0.663462 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,714 | no_op_custom_embedding.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/no_op/src/no_op_custom_embedding.py | from typing import Any
from llama_index.core import MockEmbedding
class NoOpCustomEmbedding(MockEmbedding):
embed_dim: int
def __init__(self, embed_dim: int, wait_time: float, **kwargs: Any) -> None:
"""Init params."""
super().__init__(embed_dim=embed_dim, **kwargs, wait_time=wait_time)
| 317 | Python | .py | 7 | 40.142857 | 80 | 0.685246 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,715 | constants.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/constants.py | class X2TextConstants:
PLATFORM_SERVICE_API_KEY = "PLATFORM_SERVICE_API_KEY"
X2TEXT_HOST = "X2TEXT_HOST"
X2TEXT_PORT = "X2TEXT_PORT"
ENABLE_HIGHLIGHT = "enable_highlight"
EXTRACTED_TEXT = "extracted_text"
WHISPER_HASH = "whisper-hash"
WHISPER_HASH_V2 = "whisper_hash"
| 296 | Python | .py | 8 | 32.5 | 57 | 0.708333 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,716 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/__init__.py | from unstract.sdk.adapters import AdapterDict
from unstract.sdk.adapters.x2text.register import X2TextRegistry
adapters: AdapterDict = {}
X2TextRegistry.register_adapters(adapters)
| 182 | Python | .py | 4 | 44.25 | 64 | 0.870056 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,717 | helper.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/helper.py | import logging
from typing import Any, Optional
import requests
from requests import Response
from requests.exceptions import ConnectionError, HTTPError, Timeout
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.utils import AdapterUtils
from unstract.sdk.adapters.x2text.constants import X2TextConstants
logger = logging.getLogger(__name__)
class X2TextHelper:
"""Helpers meant for x2text adapters."""
@staticmethod
def parse_response(
response: Response, out_file_path: Optional[str] = None
) -> tuple[str, bool]:
"""Parses the response from a request.
Optionally it can write the output to a file
Args:
response (Response): Response to parse
out_file_path (Optional[str], optional): Output file path to write
to, skipped if None or emtpy. Defaults to None.
Returns:
tuple[str, bool]: Response's content and status of parsing
"""
if not response.ok and not response.content:
return "", False
if isinstance(response.content, bytes):
output = response.content.decode("utf-8")
if out_file_path:
with open(out_file_path, "w", encoding="utf-8") as f:
f.write(output)
return output, True
class UnstructuredHelper:
"""Helpers meant for unstructured-community and unstructured-enterprise."""
URL = "url"
API_KEY = "api_key"
TEST_CONNECTION = "test-connection"
PROCESS = "process"
@staticmethod
def test_server_connection(
unstructured_adapter_config: dict[str, Any]
) -> bool:
UnstructuredHelper.make_request(
unstructured_adapter_config, UnstructuredHelper.TEST_CONNECTION
)
return True
@staticmethod
def process_document(
unstructured_adapter_config: dict[str, Any],
input_file_path: str,
output_file_path: Optional[str] = None,
) -> str:
try:
response: Response
with open(input_file_path, "rb") as input_f:
mime_type = AdapterUtils.get_file_mime_type(
input_file=input_file_path
)
files = {"file": (input_file_path, input_f, mime_type)}
response = UnstructuredHelper.make_request(
unstructured_adapter_config=unstructured_adapter_config,
request_type=UnstructuredHelper.PROCESS,
files=files,
)
output, is_success = X2TextHelper.parse_response(
response=response, out_file_path=output_file_path
)
if not is_success:
raise AdapterError("Couldn't extract text from file")
return output
except OSError as e:
msg = f"OS error while reading {input_file_path} "
if output_file_path:
msg += f"and writing {output_file_path}"
msg += f": {str(e)}"
logger.error(msg)
raise AdapterError(str(e))
@staticmethod
def make_request(
unstructured_adapter_config: dict[str, Any],
request_type: str,
**kwargs: dict[Any, Any],
) -> Response:
unstructured_url = unstructured_adapter_config.get(
UnstructuredHelper.URL
)
x2text_service_url = unstructured_adapter_config.get(
X2TextConstants.X2TEXT_HOST
)
x2text_service_port = unstructured_adapter_config.get(
X2TextConstants.X2TEXT_PORT
)
platform_service_api_key = unstructured_adapter_config.get(
X2TextConstants.PLATFORM_SERVICE_API_KEY
)
headers = {
"accept": "application/json",
"Authorization": f"Bearer {platform_service_api_key}",
}
body = {
"unstructured-url": unstructured_url,
}
# Add api key only if present
api_key = unstructured_adapter_config.get(UnstructuredHelper.API_KEY)
if api_key:
body["unstructured-api-key"] = api_key
x2text_url = (
f"{x2text_service_url}:{x2text_service_port}"
f"/api/v1/x2text/{request_type}"
)
# Add files only if the request is for process
files = None
if "files" in kwargs:
files = kwargs["files"] if kwargs["files"] is not None else None
try:
response = requests.post(
x2text_url, headers=headers, data=body, files=files
)
response.raise_for_status()
except ConnectionError as e:
logger.error(f"Adapter error: {e}")
raise AdapterError(
"Unable to connect to unstructured-io's service, "
"please check the URL"
)
except Timeout as e:
msg = "Request to unstructured-io's service has timed out"
logger.error(f"{msg}: {e}")
raise AdapterError(msg)
except HTTPError as e:
logger.error(f"Adapter error: {e}")
default_err = "Error while calling the unstructured-io service"
msg = AdapterUtils.get_msg_from_request_exc(
err=e, message_key="detail", default_err=default_err
)
raise AdapterError("unstructured-io: " + msg)
return response
| 5,419 | Python | .py | 137 | 29.175182 | 79 | 0.601405 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,718 | dto.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/dto.py | from dataclasses import dataclass
from typing import Optional
@dataclass
class TextExtractionMetadata:
whisper_hash: str
@dataclass
class TextExtractionResult:
extracted_text: str
extraction_metadata: Optional[TextExtractionMetadata] = None
| 257 | Python | .py | 9 | 25.777778 | 64 | 0.844262 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,719 | register.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/register.py | import logging
import os
from importlib import import_module
from typing import Any
from unstract.sdk.adapters.constants import Common
from unstract.sdk.adapters.registry import AdapterRegistry
from unstract.sdk.adapters.x2text.x2text_adapter import X2TextAdapter
logger = logging.getLogger(__name__)
class X2TextRegistry(AdapterRegistry):
@staticmethod
def register_adapters(adapters: dict[str, Any]) -> None:
current_directory = os.path.dirname(os.path.abspath(__file__))
package = "unstract.sdk.adapters.x2text"
for adapter in os.listdir(current_directory):
adapter_path = os.path.join(current_directory, adapter, Common.SRC_FOLDER)
# Check if the item is a directory and not a
# special directory like __pycache__
if os.path.isdir(adapter_path) and not adapter.startswith("__"):
X2TextRegistry._build_adapter_list(adapter, package, adapters)
if len(adapters) == 0:
logger.warning("No X2Text adapter found.")
@staticmethod
def _build_adapter_list(
adapter: str, package: str, adapters: dict[str, Any]
) -> None:
try:
full_module_path = f"{package}.{adapter}.{Common.SRC_FOLDER}"
module = import_module(full_module_path)
metadata = getattr(module, Common.METADATA, {})
if metadata.get("is_active", False):
adapter_class: X2TextAdapter = metadata[Common.ADAPTER]
adapter_id = adapter_class.get_id()
if not adapter_id or (adapter_id in adapters):
logger.warning(f"Duplicate Id : {adapter_id}")
else:
adapters[adapter_id] = {
Common.MODULE: module,
Common.METADATA: metadata,
}
except ModuleNotFoundError as exception:
logger.warning(f"Unable to import X2Text adapters : {exception}")
| 1,974 | Python | .py | 41 | 37.634146 | 86 | 0.634665 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,720 | x2text_adapter.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/x2text_adapter.py | from abc import ABC
from typing import Any, Optional
from unstract.sdk.adapters.base import Adapter
from unstract.sdk.adapters.enums import AdapterTypes
from unstract.sdk.adapters.x2text.dto import TextExtractionResult
class X2TextAdapter(Adapter, ABC):
def __init__(self, name: str):
super().__init__(name)
self.name = name
@staticmethod
def get_id() -> str:
return ""
@staticmethod
def get_name() -> str:
return ""
@staticmethod
def get_description() -> str:
return ""
@staticmethod
def get_icon() -> str:
return ""
@staticmethod
def get_json_schema() -> str:
return ""
@staticmethod
def get_adapter_type() -> AdapterTypes:
return AdapterTypes.X2TEXT
def test_connection(self) -> bool:
return False
def process(
self,
input_file_path: str,
output_file_path: Optional[str] = None,
**kwargs: dict[Any, Any],
) -> TextExtractionResult:
return TextExtractionResult(
extracted_text="extracted text", extraction_metadata=None
)
| 1,132 | Python | .py | 38 | 23.315789 | 69 | 0.641405 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,721 | constants.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/llm_whisperer/src/constants.py | import os
from enum import Enum
class ProcessingModes(Enum):
OCR = "ocr"
TEXT = "text"
class Modes(Enum):
NATIVE_TEXT = "native_text"
LOW_COST = "low_cost"
HIGH_QUALITY = "high_quality"
FORM = "form"
class OutputModes(Enum):
LINE_PRINTER = "line-printer"
DUMP_TEXT = "dump-text"
TEXT = "text"
class HTTPMethod(Enum):
GET = "GET"
POST = "POST"
class WhispererHeader:
UNSTRACT_KEY = "unstract-key"
class WhispererEndpoint:
"""Endpoints available at LLMWhisperer service."""
TEST_CONNECTION = "test-connection"
WHISPER = "whisper"
STATUS = "whisper-status"
RETRIEVE = "whisper-retrieve"
class WhispererEnv:
"""Env variables for LLM whisperer.
Can be used to alter behaviour at runtime.
Attributes:
POLL_INTERVAL: Time in seconds to wait before polling
LLMWhisperer's status API. Defaults to 30s
MAX_POLLS: Total number of times to poll the status API.
Set to -1 to poll indefinitely. Defaults to -1
"""
POLL_INTERVAL = "ADAPTER_LLMW_POLL_INTERVAL"
MAX_POLLS = "ADAPTER_LLMW_MAX_POLLS"
class WhispererConfig:
"""Dictionary keys used to configure LLMWhisperer service."""
URL = "url"
PROCESSING_MODE = "processing_mode"
MODE = "mode"
OUTPUT_MODE = "output_mode"
UNSTRACT_KEY = "unstract_key"
MEDIAN_FILTER_SIZE = "median_filter_size"
GAUSSIAN_BLUR_RADIUS = "gaussian_blur_radius"
FORCE_TEXT_PROCESSING = "force_text_processing"
LINE_SPLITTER_TOLERANCE = "line_splitter_tolerance"
HORIZONTAL_STRETCH_FACTOR = "horizontal_stretch_factor"
PAGES_TO_EXTRACT = "pages_to_extract"
STORE_METADATA_FOR_HIGHLIGHTING = "store_metadata_for_highlighting"
ADD_LINE_NOS = "add_line_nos"
OUTPUT_JSON = "output_json"
PAGE_SEPARATOR = "page_seperator"
MARK_VERTICAL_LINES = "mark_vertical_lines"
MARK_HORIZONTAL_LINES = "mark_horizontal_lines"
class WhisperStatus:
"""Values returned / used by /whisper-status endpoint."""
PROCESSING = "processing"
PROCESSED = "processed"
DELIVERED = "delivered"
UNKNOWN = "unknown"
# Used for async processing
WHISPER_HASH = "whisper-hash"
STATUS = "status"
class WhispererDefaults:
"""Defaults meant for LLM whisperer."""
MEDIAN_FILTER_SIZE = 0
GAUSSIAN_BLUR_RADIUS = 0.0
FORCE_TEXT_PROCESSING = False
LINE_SPLITTER_TOLERANCE = 0.75
HORIZONTAL_STRETCH_FACTOR = 1.0
POLL_INTERVAL = int(os.getenv(WhispererEnv.POLL_INTERVAL, 30))
MAX_POLLS = int(os.getenv(WhispererEnv.MAX_POLLS, 30))
PAGES_TO_EXTRACT = ""
ADD_LINE_NOS = True
OUTPUT_JSON = True
PAGE_SEPARATOR = "<<< >>>"
MARK_VERTICAL_LINES = False
MARK_HORIZONTAL_LINES = False
| 2,762 | Python | .py | 79 | 29.911392 | 71 | 0.693032 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,722 | llm_whisperer.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/llm_whisperer/src/llm_whisperer.py | import json
import logging
import os
import time
from pathlib import Path
from typing import Any, Optional
import requests
from requests import Response
from requests.exceptions import ConnectionError, HTTPError, Timeout
from unstract.sdk.adapters.exceptions import ExtractorError
from unstract.sdk.adapters.utils import AdapterUtils
from unstract.sdk.adapters.x2text.constants import X2TextConstants
from unstract.sdk.adapters.x2text.dto import (
TextExtractionMetadata,
TextExtractionResult,
)
from unstract.sdk.adapters.x2text.llm_whisperer.src.constants import (
HTTPMethod,
OutputModes,
ProcessingModes,
WhispererConfig,
WhispererDefaults,
WhispererEndpoint,
WhispererHeader,
WhisperStatus,
)
from unstract.sdk.adapters.x2text.x2text_adapter import X2TextAdapter
logger = logging.getLogger(__name__)
class LLMWhisperer(X2TextAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("LLMWhisperer")
self.config = settings
@staticmethod
def get_id() -> str:
return "llmwhisperer|0a1647f0-f65f-410d-843b-3d979c78350e"
@staticmethod
def get_name() -> str:
return "LLMWhisperer"
@staticmethod
def get_description() -> str:
return "LLMWhisperer X2Text"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/LLMWhisperer.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def _get_request_headers(self) -> dict[str, Any]:
"""Obtains the request headers to authenticate with LLM Whisperer.
Returns:
str: Request headers
"""
return {
"accept": "application/json",
WhispererHeader.UNSTRACT_KEY: self.config.get(WhispererConfig.UNSTRACT_KEY),
}
def _make_request(
self,
request_method: HTTPMethod,
request_endpoint: str,
headers: Optional[dict[str, Any]] = None,
params: Optional[dict[str, Any]] = None,
data: Optional[Any] = None,
) -> Response:
"""Makes a request to LLM whisperer service.
Args:
request_method (HTTPMethod): HTTPMethod to call. Can be GET or POST
request_endpoint (str): LLM whisperer endpoint to hit
headers (Optional[dict[str, Any]], optional): Headers to pass.
Defaults to None.
params (Optional[dict[str, Any]], optional): Query params to pass.
Defaults to None.
data (Optional[Any], optional): Data to pass in case of POST.
Defaults to None.
Returns:
Response: Response from the request
"""
llm_whisperer_svc_url = (
f"{self.config.get(WhispererConfig.URL)}" f"/v1/{request_endpoint}"
)
if not headers:
headers = self._get_request_headers()
try:
response: Response
if request_method == HTTPMethod.GET:
response = requests.get(
url=llm_whisperer_svc_url, headers=headers, params=params
)
elif request_method == HTTPMethod.POST:
response = requests.post(
url=llm_whisperer_svc_url,
headers=headers,
params=params,
data=data,
)
else:
raise ExtractorError(f"Unsupported request method: {request_method}")
response.raise_for_status()
except ConnectionError as e:
logger.error(f"Adapter error: {e}")
raise ExtractorError(
"Unable to connect to LLM Whisperer service, please check the URL"
)
except Timeout as e:
msg = "Request to LLM whisperer has timed out"
logger.error(f"{msg}: {e}")
raise ExtractorError(msg)
except HTTPError as e:
logger.error(f"Adapter error: {e}")
default_err = "Error while calling the LLM Whisperer service"
msg = AdapterUtils.get_msg_from_request_exc(
err=e, message_key="message", default_err=default_err
)
raise ExtractorError(msg)
return response
def _get_whisper_params(self, enable_highlight: bool = False) -> dict[str, Any]:
"""Gets query params meant for /whisper endpoint.
The params is filled based on the configuration passed.
Returns:
dict[str, Any]: Query params
"""
params = {
WhispererConfig.PROCESSING_MODE: self.config.get(
WhispererConfig.PROCESSING_MODE, ProcessingModes.TEXT.value
),
# Not providing default value to maintain legacy compatablity
# Providing default value will overide the params
# processing_mode, force_text_processing
WhispererConfig.MODE: self.config.get(WhispererConfig.MODE),
WhispererConfig.OUTPUT_MODE: self.config.get(
WhispererConfig.OUTPUT_MODE, OutputModes.LINE_PRINTER.value
),
WhispererConfig.FORCE_TEXT_PROCESSING: self.config.get(
WhispererConfig.FORCE_TEXT_PROCESSING,
WhispererDefaults.FORCE_TEXT_PROCESSING,
),
WhispererConfig.LINE_SPLITTER_TOLERANCE: self.config.get(
WhispererConfig.LINE_SPLITTER_TOLERANCE,
WhispererDefaults.LINE_SPLITTER_TOLERANCE,
),
WhispererConfig.HORIZONTAL_STRETCH_FACTOR: self.config.get(
WhispererConfig.HORIZONTAL_STRETCH_FACTOR,
WhispererDefaults.HORIZONTAL_STRETCH_FACTOR,
),
WhispererConfig.PAGES_TO_EXTRACT: self.config.get(
WhispererConfig.PAGES_TO_EXTRACT,
WhispererDefaults.PAGES_TO_EXTRACT,
),
WhispererConfig.ADD_LINE_NOS: WhispererDefaults.ADD_LINE_NOS,
WhispererConfig.OUTPUT_JSON: WhispererDefaults.OUTPUT_JSON,
WhispererConfig.PAGE_SEPARATOR: self.config.get(
WhispererConfig.PAGE_SEPARATOR,
WhispererDefaults.PAGE_SEPARATOR,
),
WhispererConfig.MARK_VERTICAL_LINES: self.config.get(
WhispererConfig.MARK_VERTICAL_LINES,
WhispererDefaults.MARK_VERTICAL_LINES,
),
WhispererConfig.MARK_HORIZONTAL_LINES: self.config.get(
WhispererConfig.MARK_HORIZONTAL_LINES,
WhispererDefaults.MARK_HORIZONTAL_LINES,
),
}
if not params[WhispererConfig.FORCE_TEXT_PROCESSING]:
params.update(
{
WhispererConfig.MEDIAN_FILTER_SIZE: self.config.get(
WhispererConfig.MEDIAN_FILTER_SIZE,
WhispererDefaults.MEDIAN_FILTER_SIZE,
),
WhispererConfig.GAUSSIAN_BLUR_RADIUS: self.config.get(
WhispererConfig.GAUSSIAN_BLUR_RADIUS,
WhispererDefaults.GAUSSIAN_BLUR_RADIUS,
),
}
)
if enable_highlight:
params.update(
{WhispererConfig.STORE_METADATA_FOR_HIGHLIGHTING: enable_highlight}
)
return params
def test_connection(self) -> bool:
self._make_request(
request_method=HTTPMethod.GET,
request_endpoint=WhispererEndpoint.TEST_CONNECTION,
)
return True
def _check_status_until_ready(
self, whisper_hash: str, headers: dict[str, Any], params: dict[str, Any]
) -> WhisperStatus:
"""Checks the extraction status by polling.
Polls the /whisper-status endpoint in fixed intervals of
env: ADAPTER_LLMW_POLL_INTERVAL for a certain number of times
controlled by env: ADAPTER_LLMW_MAX_POLLS.
Args:
whisper_hash (str): Identifier for the extraction,
returned by LLMWhisperer
headers (dict[str, Any]): Headers to pass for the status check
params (dict[str, Any]): Params to pass for the status check
Returns:
WhisperStatus: Status of the extraction
"""
POLL_INTERVAL = WhispererDefaults.POLL_INTERVAL
MAX_POLLS = WhispererDefaults.MAX_POLLS
request_count = 0
# Check status in fixed intervals upto max poll count.
while True:
request_count += 1
logger.info(
f"Checking status with interval: {POLL_INTERVAL}s"
f", request count: {request_count} [max: {MAX_POLLS}]"
)
status_response = self._make_request(
request_method=HTTPMethod.GET,
request_endpoint=WhispererEndpoint.STATUS,
headers=headers,
params=params,
)
if status_response.status_code == 200:
status_data = status_response.json()
status = status_data.get(WhisperStatus.STATUS, WhisperStatus.UNKNOWN)
logger.info(f"Whisper status for {whisper_hash}: {status}")
if status in [WhisperStatus.PROCESSED, WhisperStatus.DELIVERED]:
break
else:
raise ExtractorError(
"Error checking LLMWhisperer status: "
f"{status_response.status_code} - {status_response.text}"
)
# Exit with error if max poll count is reached
if request_count >= MAX_POLLS:
raise ExtractorError(
"Unable to extract text after attempting" f" {request_count} times"
)
time.sleep(POLL_INTERVAL)
return status
def _extract_async(self, whisper_hash: str) -> str:
"""Makes an async extraction with LLMWhisperer.
Polls and checks the status first before proceeding to retrieve once.
Args:
whisper_hash (str): Identifier of the extraction
Returns:
str: Extracted contents from the file
"""
logger.info(f"Extracting async for whisper hash: {whisper_hash}")
headers: dict[str, Any] = self._get_request_headers()
params = {
WhisperStatus.WHISPER_HASH: whisper_hash,
WhispererConfig.OUTPUT_JSON: WhispererDefaults.OUTPUT_JSON,
}
# Polls in fixed intervals and checks status
self._check_status_until_ready(
whisper_hash=whisper_hash, headers=headers, params=params
)
retrieve_response = self._make_request(
request_method=HTTPMethod.GET,
request_endpoint=WhispererEndpoint.RETRIEVE,
headers=headers,
params=params,
)
if retrieve_response.status_code == 200:
return retrieve_response.json()
else:
raise ExtractorError(
"Error retrieving from LLMWhisperer: "
f"{retrieve_response.status_code} - {retrieve_response.text}"
)
def _send_whisper_request(
self, input_file_path: str, enable_highlight: bool = False
) -> requests.Response:
headers = self._get_request_headers()
headers["Content-Type"] = "application/octet-stream"
params = self._get_whisper_params(enable_highlight)
response: requests.Response
try:
with open(input_file_path, "rb") as input_f:
response = self._make_request(
request_method=HTTPMethod.POST,
request_endpoint=WhispererEndpoint.WHISPER,
headers=headers,
params=params,
data=input_f.read(),
)
except OSError as e:
logger.error(f"OS error while reading {input_file_path}: {e}")
raise ExtractorError(str(e))
return response
def _extract_text_from_response(
self, output_file_path: Optional[str], response: requests.Response
) -> str:
output_json = {}
if response.status_code == 200:
output_json = response.json()
elif response.status_code == 202:
whisper_hash = response.json().get(WhisperStatus.WHISPER_HASH)
output_json = self._extract_async(whisper_hash=whisper_hash)
else:
raise ExtractorError("Couldn't extract text from file")
if output_file_path:
self._write_output_to_file(
output_json=output_json,
output_file_path=Path(output_file_path),
)
return output_json.get("text", "")
def _write_output_to_file(self, output_json: dict, output_file_path: Path) -> None:
"""Writes the extracted text and metadata to the specified output file
and metadata file.
Args:
output_json (dict): The dictionary containing the extracted data,
with "text" as the key for the main content.
output_file_path (Path): The file path where the extracted text
should be written.
Raises:
ExtractorError: If there is an error while writing the output file.
"""
try:
text_output = output_json.get("text", "")
logger.info(f"Writing output to {output_file_path}")
output_file_path.write_text(text_output, encoding="utf-8")
try:
# Define the directory of the output file and metadata paths
output_dir = output_file_path.parent
metadata_dir = output_dir / "metadata"
metadata_file_name = output_file_path.with_suffix(".json").name
metadata_file_path = metadata_dir / metadata_file_name
# Ensure the metadata directory exists
metadata_dir.mkdir(parents=True, exist_ok=True)
# Remove the "text" key from the metadata
metadata = {
key: value for key, value in output_json.items() if key != "text"
}
metadata_json = json.dumps(metadata, ensure_ascii=False, indent=4)
logger.info(f"Writing metadata to {metadata_file_path}")
metadata_file_path.write_text(metadata_json, encoding="utf-8")
except Exception as e:
logger.error(
f"Error while writing metadata to {metadata_file_path}: {e}"
)
except Exception as e:
logger.error(f"Error while writing {output_file_path}: {e}")
raise ExtractorError(str(e))
def process(
self,
input_file_path: str,
output_file_path: Optional[str] = None,
**kwargs: dict[Any, Any],
) -> TextExtractionResult:
"""Used to extract text from documents.
Args:
input_file_path (str): Path to file that needs to be extracted
output_file_path (Optional[str], optional): File path to write
extracted text into, if None doesn't write to a file.
Defaults to None.
Returns:
str: Extracted text
"""
response: requests.Response = self._send_whisper_request(
input_file_path,
bool(kwargs.get(X2TextConstants.ENABLE_HIGHLIGHT, False)),
)
metadata = TextExtractionMetadata(
whisper_hash=response.headers.get(X2TextConstants.WHISPER_HASH, "")
)
return TextExtractionResult(
extracted_text=self._extract_text_from_response(output_file_path, response),
extraction_metadata=metadata,
)
| 15,965 | Python | .py | 370 | 31.175676 | 88 | 0.596964 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,723 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/llm_whisperer/src/__init__.py | from .llm_whisperer import LLMWhisperer
metadata = {
"name": LLMWhisperer.__name__,
"version": "1.0.0",
"adapter": LLMWhisperer,
"description": "LLMWhisperer X2Text adapter",
"is_active": True,
}
| 217 | Python | .py | 8 | 23.5 | 49 | 0.668269 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,724 | unstructured_enterprise.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/unstructured_enterprise/src/unstructured_enterprise.py | import logging
import os
from typing import Any, Optional
from unstract.sdk.adapters.x2text.dto import TextExtractionResult
from unstract.sdk.adapters.x2text.helper import UnstructuredHelper
from unstract.sdk.adapters.x2text.x2text_adapter import X2TextAdapter
logger = logging.getLogger(__name__)
class UnstructuredEnterprise(X2TextAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("UnstructuredIOEnterprise")
self.config = settings
@staticmethod
def get_id() -> str:
return "unstructuredenterprise|eb1b6c58-221f-4db0-a4a5-e5f9cdca44e1"
@staticmethod
def get_name() -> str:
return "Unstructured IO Enterprise"
@staticmethod
def get_description() -> str:
return "Unstructured IO Enterprise X2Text"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/UnstructuredIO.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def process(
self,
input_file_path: str,
output_file_path: Optional[str] = None,
**kwargs: dict[str, Any],
) -> TextExtractionResult:
extracted_text: str = UnstructuredHelper.process_document(
self.config, input_file_path, output_file_path
)
return TextExtractionResult(extracted_text=extracted_text)
def test_connection(self) -> bool:
result: bool = UnstructuredHelper.test_server_connection(self.config)
return result
| 1,610 | Python | .py | 42 | 31.809524 | 77 | 0.690231 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,725 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/unstructured_enterprise/src/__init__.py | from .unstructured_enterprise import UnstructuredEnterprise
metadata = {
"name": UnstructuredEnterprise.__name__,
"version": "1.0.0",
"adapter": UnstructuredEnterprise,
"description": "UnstructuredIO X2Text adapter",
"is_active": True,
}
| 259 | Python | .py | 8 | 28.75 | 59 | 0.724 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,726 | constants.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/llm_whisperer_v2/src/constants.py | import os
from enum import Enum
class Modes(Enum):
NATIVE_TEXT = "native_text"
LOW_COST = "low_cost"
HIGH_QUALITY = "high_quality"
FORM = "form"
class OutputModes(Enum):
LAYOUT_PRESERVING = "layout_preserving"
TEXT = "text"
class HTTPMethod(Enum):
GET = "GET"
POST = "POST"
class WhispererHeader:
UNSTRACT_KEY = "unstract-key"
class WhispererEndpoint:
"""Endpoints available at LLMWhisperer service."""
TEST_CONNECTION = "test-connection"
WHISPER = "whisper"
STATUS = "whisper-status"
RETRIEVE = "whisper-retrieve"
class WhispererEnv:
"""Env variables for LLM whisperer.
Can be used to alter behaviour at runtime.
Attributes:
POLL_INTERVAL: Time in seconds to wait before polling
LLMWhisperer's status API. Defaults to 30s
MAX_POLLS: Total number of times to poll the status API.
Set to -1 to poll indefinitely. Defaults to -1
"""
POLL_INTERVAL = "ADAPTER_LLMW_POLL_INTERVAL"
MAX_POLLS = "ADAPTER_LLMW_MAX_POLLS"
class WhispererConfig:
"""Dictionary keys used to configure LLMWhisperer service."""
URL = "url"
MODE = "mode"
OUTPUT_MODE = "output_mode"
UNSTRACT_KEY = "unstract_key"
MEDIAN_FILTER_SIZE = "median_filter_size"
GAUSSIAN_BLUR_RADIUS = "gaussian_blur_radius"
LINE_SPLITTER_TOLERANCE = "line_splitter_tolerance"
LINE_SPLITTER_STRATEGY = "line_splitter_strategy"
HORIZONTAL_STRETCH_FACTOR = "horizontal_stretch_factor"
PAGES_TO_EXTRACT = "pages_to_extract"
MARK_VERTICAL_LINES = "mark_vertical_lines"
MARK_HORIZONTAL_LINES = "mark_horizontal_lines"
PAGE_SEPARATOR = "page_seperator"
URL_IN_POST = "url_in_post"
TAG = "tag"
USE_WEBHOOK = "use_webhook"
WEBHOOK_METADATA = "webhook_metadata"
TEXT_ONLY = "text_only"
class WhisperStatus:
"""Values returned / used by /whisper-status endpoint."""
PROCESSING = "processing"
PROCESSED = "processed"
DELIVERED = "delivered"
UNKNOWN = "unknown"
# Used for async processing
WHISPER_HASH = "whisper_hash"
STATUS = "status"
class WhispererDefaults:
"""Defaults meant for LLM whisperer."""
MEDIAN_FILTER_SIZE = 0
GAUSSIAN_BLUR_RADIUS = 0.0
FORCE_TEXT_PROCESSING = False
LINE_SPLITTER_TOLERANCE = 0.75
LINE_SPLITTER_STRATEGY = "left-priority"
HORIZONTAL_STRETCH_FACTOR = 1.0
POLL_INTERVAL = int(os.getenv(WhispererEnv.POLL_INTERVAL, 30))
MAX_POLLS = int(os.getenv(WhispererEnv.MAX_POLLS, 30))
PAGES_TO_EXTRACT = ""
PAGE_SEPARATOR = "<<<"
MARK_VERTICAL_LINES = False
MARK_HORIZONTAL_LINES = False
URL_IN_POST = False
TAG = "default"
TEXT_ONLY = False
| 2,712 | Python | .py | 78 | 29.705128 | 66 | 0.691836 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,727 | llm_whisperer_v2.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/llm_whisperer_v2/src/llm_whisperer_v2.py | import json
import logging
import os
from typing import Any, Optional
import requests
from unstract.sdk.adapters.x2text.constants import X2TextConstants
from unstract.sdk.adapters.x2text.dto import (
TextExtractionMetadata,
TextExtractionResult,
)
from unstract.sdk.adapters.x2text.llm_whisperer_v2.src.constants import (
HTTPMethod,
WhispererEndpoint,
)
from unstract.sdk.adapters.x2text.llm_whisperer_v2.src.helper import LLMWhispererHelper
from unstract.sdk.adapters.x2text.x2text_adapter import X2TextAdapter
logger = logging.getLogger(__name__)
class LLMWhispererV2(X2TextAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("LLMWhispererV2")
self.config = settings
@staticmethod
def get_id() -> str:
return "llmwhisperer|a5e6b8af-3e1f-4a80-b006-d017e8e67f93"
@staticmethod
def get_name() -> str:
return "LLMWhisperer V2"
@staticmethod
def get_description() -> str:
return "LLMWhisperer V2 X2Text"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/LLMWhispererV2.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def test_connection(self) -> bool:
LLMWhispererHelper.make_request(
config=self.config,
request_method=HTTPMethod.GET,
request_endpoint=WhispererEndpoint.TEST_CONNECTION,
)
return True
def process(
self,
input_file_path: str,
output_file_path: Optional[str] = None,
**kwargs: dict[Any, Any],
) -> TextExtractionResult:
"""Used to extract text from documents.
Args:
input_file_path (str): Path to file that needs to be extracted
output_file_path (Optional[str], optional): File path to write
extracted text into, if None doesn't write to a file.
Defaults to None.
Returns:
str: Extracted text
"""
response: requests.Response = LLMWhispererHelper.send_whisper_request(
input_file_path, self.config
)
response_text = response.text
reponse_dict = json.loads(response_text)
metadata = TextExtractionMetadata(
whisper_hash=reponse_dict.get(X2TextConstants.WHISPER_HASH_V2, "")
)
return TextExtractionResult(
extracted_text=LLMWhispererHelper.extract_text_from_response(
self.config, output_file_path, reponse_dict, response
),
extraction_metadata=metadata,
)
| 2,713 | Python | .py | 75 | 28.506667 | 87 | 0.661327 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,728 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/llm_whisperer_v2/src/__init__.py | from .llm_whisperer_v2 import LLMWhispererV2
metadata = {
"name": LLMWhispererV2.__name__,
"version": "1.0.0",
"adapter": LLMWhispererV2,
"description": "LLMWhispererV2 X2Text adapter",
"is_active": True,
}
| 228 | Python | .py | 8 | 24.875 | 51 | 0.680365 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,729 | helper.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/llm_whisperer_v2/src/helper.py | import json
import logging
import time
from pathlib import Path
from typing import Any, Optional
import requests
from requests import Response
from requests.exceptions import ConnectionError, HTTPError, Timeout
from unstract.sdk.adapters.exceptions import ExtractorError
from unstract.sdk.adapters.utils import AdapterUtils
from unstract.sdk.adapters.x2text.llm_whisperer_v2.src.constants import (
HTTPMethod,
Modes,
OutputModes,
WhispererConfig,
WhispererDefaults,
WhispererEndpoint,
WhispererHeader,
WhisperStatus,
)
logger = logging.getLogger(__name__)
class LLMWhispererHelper:
@staticmethod
def get_request_headers(config: dict[str, Any]) -> dict[str, Any]:
"""Obtains the request headers to authenticate with LLM Whisperer.
Returns:
str: Request headers
"""
return {
"accept": "application/json",
WhispererHeader.UNSTRACT_KEY: config.get(WhispererConfig.UNSTRACT_KEY),
}
@staticmethod
def make_request(
config: dict[str, Any],
request_method: HTTPMethod,
request_endpoint: str,
headers: Optional[dict[str, Any]] = None,
params: Optional[dict[str, Any]] = None,
data: Optional[Any] = None,
) -> Response:
"""Makes a request to LLM whisperer service.
Args:
request_method (HTTPMethod): HTTPMethod to call. Can be GET or POST
request_endpoint (str): LLM whisperer endpoint to hit
headers (Optional[dict[str, Any]], optional): Headers to pass.
Defaults to None.
params (Optional[dict[str, Any]], optional): Query params to pass.
Defaults to None.
data (Optional[Any], optional): Data to pass in case of POST.
Defaults to None.
Returns:
Response: Response from the request
"""
llm_whisperer_svc_url = (
f"{config.get(WhispererConfig.URL)}" f"/api/v2/{request_endpoint}"
)
if not headers:
headers = LLMWhispererHelper.get_request_headers(config=config)
try:
response: Response
if request_method == HTTPMethod.GET:
response = requests.get(
url=llm_whisperer_svc_url, headers=headers, params=params
)
elif request_method == HTTPMethod.POST:
response = requests.post(
url=llm_whisperer_svc_url,
headers=headers,
params=params,
data=data,
)
else:
raise ExtractorError(f"Unsupported request method: {request_method}")
response.raise_for_status()
except ConnectionError as e:
logger.error(f"Adapter error: {e}")
raise ExtractorError(
"Unable to connect to LLM Whisperer service, please check the URL"
)
except Timeout as e:
msg = "Request to LLM whisperer has timed out"
logger.error(f"{msg}: {e}")
raise ExtractorError(msg)
except HTTPError as e:
logger.error(f"Adapter error: {e}")
default_err = "Error while calling the LLM Whisperer service"
msg = AdapterUtils.get_msg_from_request_exc(
err=e, message_key="message", default_err=default_err
)
raise ExtractorError(msg)
return response
@staticmethod
def get_whisperer_params(config: dict[str, Any]) -> dict[str, Any]:
"""Gets query params meant for /whisper endpoint.
The params is filled based on the configuration passed.
Returns:
dict[str, Any]: Query params
"""
params = {
WhispererConfig.MODE: config.get(WhispererConfig.MODE, Modes.FORM.value),
WhispererConfig.OUTPUT_MODE: config.get(
WhispererConfig.OUTPUT_MODE, OutputModes.LAYOUT_PRESERVING.value
),
WhispererConfig.LINE_SPLITTER_TOLERANCE: config.get(
WhispererConfig.LINE_SPLITTER_TOLERANCE,
WhispererDefaults.LINE_SPLITTER_TOLERANCE,
),
WhispererConfig.LINE_SPLITTER_STRATEGY: config.get(
WhispererConfig.LINE_SPLITTER_STRATEGY,
WhispererDefaults.LINE_SPLITTER_STRATEGY,
),
WhispererConfig.HORIZONTAL_STRETCH_FACTOR: config.get(
WhispererConfig.HORIZONTAL_STRETCH_FACTOR,
WhispererDefaults.HORIZONTAL_STRETCH_FACTOR,
),
WhispererConfig.PAGES_TO_EXTRACT: config.get(
WhispererConfig.PAGES_TO_EXTRACT,
WhispererDefaults.PAGES_TO_EXTRACT,
),
WhispererConfig.MARK_VERTICAL_LINES: config.get(
WhispererConfig.MARK_VERTICAL_LINES,
WhispererDefaults.MARK_VERTICAL_LINES,
),
WhispererConfig.MARK_HORIZONTAL_LINES: config.get(
WhispererConfig.MARK_HORIZONTAL_LINES,
WhispererDefaults.MARK_HORIZONTAL_LINES,
),
WhispererConfig.URL_IN_POST: WhispererDefaults.URL_IN_POST,
WhispererConfig.PAGE_SEPARATOR: config.get(
WhispererConfig.PAGE_SEPARATOR,
WhispererDefaults.PAGE_SEPARATOR,
),
# Not providing default value to maintain legacy compatablity
# these are optional params and identifiers for audit
WhispererConfig.TAG: config.get(
WhispererConfig.TAG,
WhispererDefaults.TAG,
),
WhispererConfig.USE_WEBHOOK: config.get(WhispererConfig.USE_WEBHOOK),
WhispererConfig.WEBHOOK_METADATA: config.get(
WhispererConfig.WEBHOOK_METADATA
),
}
if params[WhispererConfig.MODE] == Modes.LOW_COST.value:
params.update(
{
WhispererConfig.MEDIAN_FILTER_SIZE: config.get(
WhispererConfig.MEDIAN_FILTER_SIZE,
WhispererDefaults.MEDIAN_FILTER_SIZE,
),
WhispererConfig.GAUSSIAN_BLUR_RADIUS: config.get(
WhispererConfig.GAUSSIAN_BLUR_RADIUS,
WhispererDefaults.GAUSSIAN_BLUR_RADIUS,
),
}
)
return params
@staticmethod
def check_status_until_ready(
config: dict[str, Any],
whisper_hash: str,
headers: dict[str, Any],
params: dict[str, Any],
) -> WhisperStatus:
"""Checks the extraction status by polling.
Polls the /whisper-status endpoint in fixed intervals of
env: ADAPTER_LLMW_POLL_INTERVAL for a certain number of times
controlled by env: ADAPTER_LLMW_MAX_POLLS.
Args:
whisper_hash (str): Identifier for the extraction,
returned by LLMWhisperer
headers (dict[str, Any]): Headers to pass for the status check
params (dict[str, Any]): Params to pass for the status check
Returns:
WhisperStatus: Status of the extraction
"""
POLL_INTERVAL = WhispererDefaults.POLL_INTERVAL
MAX_POLLS = WhispererDefaults.MAX_POLLS
request_count = 0
# Check status in fixed intervals upto max poll count.
while True:
request_count += 1
logger.info(
f"Checking status with interval: {POLL_INTERVAL}s"
f", request count: {request_count} [max: {MAX_POLLS}]"
)
status_response = LLMWhispererHelper.make_request(
config=config,
request_method=HTTPMethod.GET,
request_endpoint=WhispererEndpoint.STATUS,
headers=headers,
params=params,
)
if status_response.status_code == 200:
status_data = status_response.json()
status = status_data.get(WhisperStatus.STATUS, WhisperStatus.UNKNOWN)
logger.info(f"Whisper status for {whisper_hash}: {status}")
if status in [WhisperStatus.PROCESSED, WhisperStatus.DELIVERED]:
break
else:
raise ExtractorError(
"Error checking LLMWhisperer status: "
f"{status_response.status_code} - {status_response.text}"
)
# Exit with error if max poll count is reached
if request_count >= MAX_POLLS:
raise ExtractorError(
"Unable to extract text after attempting" f" {request_count} times"
)
time.sleep(POLL_INTERVAL)
return status
@staticmethod
def extract_async(config: dict[str, Any], whisper_hash: str) -> dict[Any, Any]:
"""Makes an async extraction with LLMWhisperer.
Polls and checks the status first before proceeding to retrieve once.
Args:
whisper_hash (str): Identifier of the extraction
Returns:
str: Extracted contents from the file
"""
logger.info(f"Extracting async for whisper hash: {whisper_hash}")
headers: dict[str, Any] = LLMWhispererHelper.get_request_headers(config)
params = {
WhisperStatus.WHISPER_HASH: whisper_hash,
WhispererConfig.TEXT_ONLY: WhispererDefaults.TEXT_ONLY,
}
# Polls in fixed intervals and checks status
LLMWhispererHelper.check_status_until_ready(
config=config, whisper_hash=whisper_hash, headers=headers, params=params
)
retrieve_response = LLMWhispererHelper.make_request(
config=config,
request_method=HTTPMethod.GET,
request_endpoint=WhispererEndpoint.RETRIEVE,
headers=headers,
params=params,
)
if retrieve_response.status_code == 200:
return retrieve_response.json()
else:
raise ExtractorError(
"Error retrieving from LLMWhisperer: "
f"{retrieve_response.status_code} - {retrieve_response.text}"
)
@staticmethod
def send_whisper_request(
input_file_path: str, config: dict[str, Any]
) -> requests.Response:
headers = LLMWhispererHelper.get_request_headers(config)
headers["Content-Type"] = "application/octet-stream"
params = LLMWhispererHelper.get_whisperer_params(config)
response: requests.Response
try:
with open(input_file_path, "rb") as input_f:
response = LLMWhispererHelper.make_request(
config=config,
request_method=HTTPMethod.POST,
request_endpoint=WhispererEndpoint.WHISPER,
headers=headers,
params=params,
data=input_f.read(),
)
except OSError as e:
logger.error(f"OS error while reading {input_file_path}: {e}")
raise ExtractorError(str(e))
return response
@staticmethod
def extract_text_from_response(
config: dict[str, Any],
output_file_path: Optional[str],
response_dict: dict[str, Any],
response: Response,
) -> str:
output_json = {}
if response.status_code == 200:
output_json = response.json()
elif response.status_code == 202:
whisper_hash = response_dict.get(WhisperStatus.WHISPER_HASH)
output_json = LLMWhispererHelper.extract_async(
config=config, whisper_hash=whisper_hash
)
else:
raise ExtractorError("Couldn't extract text from file")
if output_file_path:
LLMWhispererHelper.write_output_to_file(
output_json=output_json,
output_file_path=Path(output_file_path),
)
return output_json.get("result_text", "")
@staticmethod
def write_output_to_file(output_json: dict, output_file_path: Path) -> None:
"""Writes the extracted text and metadata to the specified output file
and metadata file.
Args:
output_json (dict): The dictionary containing the extracted data,
with "text" as the key for the main content.
output_file_path (Path): The file path where the extracted text
should be written.
Raises:
ExtractorError: If there is an error while writing the output file.
"""
try:
text_output = output_json.get("result_text", "")
logger.info(f"Writing output to {output_file_path}")
output_file_path.write_text(text_output, encoding="utf-8")
except Exception as e:
logger.error(f"Error while writing {output_file_path}: {e}")
raise ExtractorError(str(e))
try:
# Define the directory of the output file and metadata paths
output_dir = output_file_path.parent
metadata_dir = output_dir / "metadata"
metadata_file_name = output_file_path.with_suffix(".json").name
metadata_file_path = metadata_dir / metadata_file_name
# Ensure the metadata directory exists
metadata_dir.mkdir(parents=True, exist_ok=True)
# Remove the "result_text" key from the metadata
metadata = {
key: value for key, value in output_json.items() if key != "result_text"
}
metadata_json = json.dumps(metadata, ensure_ascii=False, indent=4)
logger.info(f"Writing metadata to {metadata_file_path}")
metadata_file_path.write_text(metadata_json, encoding="utf-8")
except Exception as e:
logger.warn(f"Error while writing metadata to {metadata_file_path}: {e}")
| 14,078 | Python | .py | 326 | 31.09816 | 88 | 0.598265 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,730 | constants.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/llama_parse/src/constants.py | class LlamaParseConfig:
"""Dictionary keys used to process LlamaParse."""
API_KEY = "api_key"
BASE_URL = "base_url"
RESULT_TYPE = "result_type"
NUM_WORKERS = "num_workers"
VERBOSE = "verbose"
LANGUAGE = "language"
| 243 | Python | .py | 8 | 25.75 | 53 | 0.65812 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,731 | llama_parse.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/llama_parse/src/llama_parse.py | import logging
import os
import pathlib
from typing import Any, Optional
from httpx import ConnectError
from llama_parse import LlamaParse
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.utils import AdapterUtils
from unstract.sdk.adapters.x2text.dto import TextExtractionResult
from unstract.sdk.adapters.x2text.llama_parse.src.constants import LlamaParseConfig
from unstract.sdk.adapters.x2text.x2text_adapter import X2TextAdapter
logger = logging.getLogger(__name__)
class LlamaParseAdapter(X2TextAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("LlamaParse")
self.config = settings
@staticmethod
def get_id() -> str:
return "llamaparse|78860239-b3cc-4cc5-b3de-f84315f75d14"
@staticmethod
def get_name() -> str:
return "LlamaParse"
@staticmethod
def get_description() -> str:
return "LlamaParse X2Text"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/llama-parse.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def _call_parser(
self,
input_file_path: str,
) -> str:
parser = LlamaParse(
api_key=self.config.get(LlamaParseConfig.API_KEY),
base_url=self.config.get(LlamaParseConfig.BASE_URL),
result_type=self.config.get(LlamaParseConfig.RESULT_TYPE),
verbose=self.config.get(LlamaParseConfig.VERBOSE),
language="en",
ignore_errors=False,
)
try:
file_extension = pathlib.Path(input_file_path).suffix
if not file_extension:
try:
input_file_extension = AdapterUtils.guess_extention(input_file_path)
input_file_path_copy = input_file_path
input_file_path = ".".join(
(input_file_path_copy, input_file_extension)
)
except OSError as os_err:
logger.error("Exception raised while handling input file.")
raise AdapterError(str(os_err))
documents = parser.load_data(input_file_path)
except ConnectError as connec_err:
logger.error(f"Invalid Base URL given. : {connec_err}")
raise AdapterError(
"Unable to connect to llama-parse`s service, "
"please check the Base URL"
)
except Exception as exe:
logger.error(
"Seems like an invalid API Key or possible internal errors: {exe}"
)
raise AdapterError(str(exe))
response_text = documents[0].text
return response_text # type:ignore
def process(
self,
input_file_path: str,
output_file_path: Optional[str] = None,
**kwargs: dict[Any, Any],
) -> TextExtractionResult:
response_text = self._call_parser(input_file_path=input_file_path)
if output_file_path:
with open(output_file_path, "w", encoding="utf-8") as f:
f.write(response_text)
return TextExtractionResult(extracted_text=response_text)
def test_connection(self) -> bool:
self._call_parser(
input_file_path=f"{os.path.dirname(__file__)}/static/test_input.doc"
)
return True
| 3,516 | Python | .py | 88 | 30.318182 | 88 | 0.622946 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,732 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/llama_parse/src/__init__.py | from .llama_parse import LlamaParseAdapter
metadata = {
"name": LlamaParseAdapter.__name__,
"version": "1.0.0",
"adapter": LlamaParseAdapter,
"description": "LlamaParse X2Text adapter",
"is_active": True,
}
| 228 | Python | .py | 8 | 24.875 | 47 | 0.684932 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,733 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/unstructured_community/src/__init__.py | from .unstructured_community import UnstructuredCommunity
metadata = {
"name": UnstructuredCommunity.__name__,
"version": "1.0.0",
"adapter": UnstructuredCommunity,
"description": "UnstructuredIO X2Text adapter",
"is_active": True,
}
| 255 | Python | .py | 8 | 28.25 | 57 | 0.719512 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,734 | unstructured_community.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/unstructured_community/src/unstructured_community.py | import logging
import os
from typing import Any, Optional
from unstract.sdk.adapters.x2text.dto import TextExtractionResult
from unstract.sdk.adapters.x2text.helper import UnstructuredHelper
from unstract.sdk.adapters.x2text.x2text_adapter import X2TextAdapter
logger = logging.getLogger(__name__)
class UnstructuredCommunity(X2TextAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("UnstructuredIOCommunity")
self.config = settings
@staticmethod
def get_id() -> str:
return "unstructuredcommunity|eeed506f-1875-457f-9101-846fc7115676"
@staticmethod
def get_name() -> str:
return "Unstructured IO Community"
@staticmethod
def get_description() -> str:
return "Unstructured IO Community X2Text"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/UnstructuredIO.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def process(
self,
input_file_path: str,
output_file_path: Optional[str] = None,
**kwargs: dict[Any, Any],
) -> TextExtractionResult:
extracted_text: str = UnstructuredHelper.process_document(
self.config, input_file_path, output_file_path
)
return TextExtractionResult(extracted_text=extracted_text)
def test_connection(self) -> bool:
result: bool = UnstructuredHelper.test_server_connection(self.config)
return result
| 1,605 | Python | .py | 42 | 31.690476 | 77 | 0.689233 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,735 | no_op_x2text.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/no_op/src/no_op_x2text.py | import logging
import os
import time
from typing import Any, Optional
from unstract.sdk.adapters.x2text.dto import TextExtractionResult
from unstract.sdk.adapters.x2text.x2text_adapter import X2TextAdapter
logger = logging.getLogger(__name__)
class NoOpX2Text(X2TextAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("NoOpX2Text")
self.config = settings
@staticmethod
def get_id() -> str:
return "noOpX2text|mp66d1op-7100-d340-9101-846fc7115676"
@staticmethod
def get_name() -> str:
return "No Op X2Text"
@staticmethod
def get_description() -> str:
return "No Op X2Text Adapter"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/noOpx2Text.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def process(
self,
input_file_path: str,
output_file_path: Optional[str] = None,
**kwargs: dict[Any, Any],
) -> TextExtractionResult:
extracted_text: str = (
"This is a No Op x2text adapter response."
" This is a sample response and intended for testing \f"
)
time.sleep(self.config.get("wait_time"))
if output_file_path:
with open(output_file_path, "w", encoding="utf-8") as f:
f.write(extracted_text)
return TextExtractionResult(extracted_text=extracted_text)
def test_connection(self) -> bool:
time.sleep(self.config.get("wait_time"))
return True
| 1,671 | Python | .py | 47 | 28.446809 | 72 | 0.641042 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,736 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/x2text/no_op/src/__init__.py | from unstract.sdk.adapters.x2text.no_op.src.no_op_x2text import NoOpX2Text
metadata = {
"name": NoOpX2Text.__name__,
"version": "1.0.0",
"adapter": NoOpX2Text,
"description": "NoOpX2Text",
"is_active": True,
}
| 231 | Python | .py | 8 | 25.25 | 74 | 0.666667 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,737 | ocr_adapter.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/ocr/ocr_adapter.py | from abc import ABC
from typing import Any, Optional
from unstract.sdk.adapters.base import Adapter
from unstract.sdk.adapters.enums import AdapterTypes
class OCRAdapter(Adapter, ABC):
def __init__(self, name: str):
super().__init__(name)
self.name = name
@staticmethod
def get_id() -> str:
return ""
@staticmethod
def get_name() -> str:
return ""
@staticmethod
def get_description() -> str:
return ""
@staticmethod
def get_icon() -> str:
return ""
@staticmethod
def get_json_schema() -> str:
return ""
@staticmethod
def get_adapter_type() -> AdapterTypes:
return AdapterTypes.OCR
def process(
self, input_file_path: str, output_file_path: Optional[str] = None
) -> str:
# Overriding methods will contain actual implementation
return ""
def test_connection(self, llm_metadata: dict[str, Any]) -> bool:
return False
| 986 | Python | .py | 33 | 23.69697 | 74 | 0.63482 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,738 | constants.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/ocr/constants.py | class FileType:
TEXT_PLAIN = "text/plain"
IMAGE_JPEG = "image/jpeg"
IMAGE_PNG = "image/png"
IMAGE_TIFF = "image/tiff"
IMAGE_BMP = "image/bmp"
IMAGE_GIF = "image/gif"
IMAGE_WEBP = "image/webp"
APPLICATION_PDF = "application/pdf"
ALLOWED_TYPES = [
IMAGE_JPEG,
IMAGE_PNG,
IMAGE_TIFF,
IMAGE_BMP,
IMAGE_GIF,
IMAGE_WEBP,
APPLICATION_PDF,
]
| 430 | Python | .py | 18 | 17.555556 | 39 | 0.57767 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,739 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/ocr/__init__.py | from unstract.sdk.adapters import AdapterDict
from unstract.sdk.adapters.ocr.register import OCRRegistry
adapters: AdapterDict = {}
OCRRegistry.register_adapters(adapters)
| 173 | Python | .py | 4 | 42 | 58 | 0.863095 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,740 | register.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/ocr/register.py | import logging
import os
from importlib import import_module
from typing import Any
from unstract.sdk.adapters.constants import Common
from unstract.sdk.adapters.ocr.ocr_adapter import OCRAdapter
from unstract.sdk.adapters.registry import AdapterRegistry
logger = logging.getLogger(__name__)
class OCRRegistry(AdapterRegistry):
@staticmethod
def register_adapters(adapters: dict[str, Any]) -> None:
current_directory = os.path.dirname(os.path.abspath(__file__))
package = "unstract.sdk.adapters.ocr"
for adapter in os.listdir(current_directory):
adapter_path = os.path.join(current_directory, adapter, Common.SRC_FOLDER)
# Check if the item is a directory and not a
# special directory like __pycache__
if os.path.isdir(adapter_path) and not adapter.startswith("__"):
OCRRegistry._build_adapter_list(adapter, package, adapters)
if len(adapters) == 0:
logger.warning("No ocr adapter found.")
@staticmethod
def _build_adapter_list(
adapter: str, package: str, adapters: dict[str, Any]
) -> None:
try:
full_module_path = f"{package}.{adapter}.{Common.SRC_FOLDER}"
module = import_module(full_module_path)
metadata = getattr(module, Common.METADATA, {})
if metadata.get("is_active", False):
adapter_class: OCRAdapter = metadata[Common.ADAPTER]
adapter_id = adapter_class.get_id()
if not adapter_id or (adapter_id in adapters):
logger.warning(f"Duplicate Id : {adapter_id}")
else:
adapters[adapter_id] = {
Common.MODULE: module,
Common.METADATA: metadata,
}
except ModuleNotFoundError as exception:
logger.warning(f"Unable to import ocr adapters : {exception}")
| 1,947 | Python | .py | 41 | 36.97561 | 86 | 0.629474 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,741 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/ocr/google_document_ai/src/__init__.py | from .google_document_ai import GoogleDocumentAI
metadata = {
"name": GoogleDocumentAI.__name__,
"version": "1.0.0",
"adapter": GoogleDocumentAI,
"description": "Google Document AI OCR adapter",
"is_active": True,
}
| 237 | Python | .py | 8 | 26 | 52 | 0.684211 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,742 | google_document_ai.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/ocr/google_document_ai/src/google_document_ai.py | import base64
import json
import logging
import os
from typing import Any, Optional
import requests
from filetype import filetype
from google.auth.transport import requests as google_requests
from google.oauth2.service_account import Credentials
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.ocr.constants import FileType
from unstract.sdk.adapters.ocr.ocr_adapter import OCRAdapter
logger = logging.getLogger(__name__)
class GoogleDocumentAIKey:
RAW_DOCUMENT = "rawDocument"
MIME_TYPE = "mimeType"
CONTENT = "content"
SKIP_HUMAN_REVIEW = "skipHumanReview"
FIELD_MASK = "fieldMask"
class Constants:
URL = "url"
CREDENTIALS = "credentials"
CREDENTIAL_SCOPES = ["https://www.googleapis.com/auth/cloud-platform"]
class GoogleDocumentAI(OCRAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("GoogleDocumentAI")
self.config = settings
google_service_account = self.config.get(Constants.CREDENTIALS)
if not google_service_account:
logger.error("Google service account not found")
else:
self.google_service_account = json.loads(google_service_account)
@staticmethod
def get_id() -> str:
return "googledocumentai|1013f64b-ecc9-4e35-b986-aebd60fb55d7"
@staticmethod
def get_name() -> str:
return "GoogleDocumentAI"
@staticmethod
def get_description() -> str:
return "Google Document AI OCR"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/GoogleDocumentAI.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
""" Construct the request body to be sent to Google AI Document server """
def _get_request_body(
self, file_type_mime: str, file_content_in_bytes: bytes
) -> dict[str, Any]:
return {
GoogleDocumentAIKey.RAW_DOCUMENT: {
GoogleDocumentAIKey.MIME_TYPE: file_type_mime,
GoogleDocumentAIKey.CONTENT: base64.b64encode(
file_content_in_bytes
).decode("utf-8"),
},
GoogleDocumentAIKey.SKIP_HUMAN_REVIEW: True,
GoogleDocumentAIKey.FIELD_MASK: "text",
}
""" Construct the request headers to be sent
to Google AI Document server """
def _get_request_headers(self) -> dict[str, Any]:
credentials = Credentials.from_service_account_info(
self.google_service_account, scopes=Constants.CREDENTIAL_SCOPES
) # type: ignore
credentials.refresh(google_requests.Request()) # type: ignore
return {
"Content-Type": "application/json; charset=utf-8",
"Authorization": f"Bearer {credentials.token}",
}
""" Detect the mime type from the file content """
def _get_input_file_type_mime(self, input_file_path: str) -> str:
with open(input_file_path, mode="rb") as file_obj:
sample_contents = file_obj.read(100)
file_type = filetype.guess(sample_contents)
file_type_mime: str = file_type.MIME if file_type else FileType.TEXT_PLAIN
if file_type_mime not in FileType.ALLOWED_TYPES:
logger.error("Input file type not supported: " f"{file_type_mime}")
logger.info(f"file: `{input_file_path} [{file_type_mime}]`\n\n")
return file_type_mime
def process(
self, input_file_path: str, output_file_path: Optional[str] = None
) -> str:
try:
file_type_mime = self._get_input_file_type_mime(input_file_path)
if os.path.isfile(input_file_path):
with open(input_file_path, "rb") as fop:
file_content_in_bytes: bytes = fop.read()
else:
raise AdapterError(f"File not found {input_file_path}")
processor_url = self.config.get(Constants.URL, "") + ":process"
headers = self._get_request_headers()
data = self._get_request_body(
file_type_mime=file_type_mime,
file_content_in_bytes=file_content_in_bytes,
)
response = requests.post(processor_url, headers=headers, json=data)
if response.status_code != 200:
logger.error(f"Error while calling Google Document AI: {response.text}")
response_json: dict[str, Any] = response.json()
result_text: str = response_json["document"]["text"]
if output_file_path is not None:
with open(output_file_path, "w", encoding="utf-8") as f:
f.write(result_text)
f.close()
return result_text
except Exception as e:
logger.error(f"Error while processing document {e}")
if not isinstance(e, AdapterError):
raise AdapterError(str(e))
else:
raise e
finally:
if fop is not None:
fop.close()
def test_connection(self) -> bool:
try:
url = self.config.get(Constants.URL, "")
headers = self._get_request_headers()
response = requests.get(url, headers=headers)
if response.status_code != 200:
logger.error(f"Error while testing Google Document AI: {response.text}")
raise AdapterError(f"{response.status_code} - {response.reason}")
else:
return True
except Exception as e:
logger.error(f"Error occured while testing adapter {e}")
if not isinstance(e, AdapterError):
raise AdapterError(str(e))
else:
raise e
| 5,861 | Python | .py | 136 | 33.279412 | 88 | 0.618989 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,743 | llm_adapter.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/llm_adapter.py | import logging
import re
from abc import ABC, abstractmethod
from typing import Optional
from llama_index.core.llms import LLM, MockLLM
from unstract.sdk.adapters.base import Adapter
from unstract.sdk.adapters.enums import AdapterTypes
from unstract.sdk.adapters.exceptions import LLMError
logger = logging.getLogger(__name__)
class LLMAdapter(Adapter, ABC):
def __init__(self, name: str):
super().__init__(name)
self.name = name
@staticmethod
def get_id() -> str:
return ""
@staticmethod
def get_name() -> str:
return ""
@staticmethod
def get_description() -> str:
return ""
@staticmethod
@abstractmethod
def get_provider() -> str:
pass
@staticmethod
def get_icon() -> str:
return ""
@staticmethod
def get_json_schema() -> str:
return ""
@staticmethod
def get_adapter_type() -> AdapterTypes:
return AdapterTypes.LLM
@staticmethod
def parse_llm_err(e: Exception) -> LLMError:
"""Parse the error from an LLM provider.
Helps parse errors from a provider and wraps with custom exception.
Args:
e (Exception): Exception from LLM provider
Returns:
LLMError: Error to be sent to the user
"""
return LLMError(str(e))
def get_llm_instance(self) -> LLM:
"""Instantiate the llama index LLM class.
Returns:
LLM: llama index implementation of the LLM
Raises exceptions for any error
"""
return MockLLM()
@staticmethod
def _test_llm_instance(llm: Optional[LLM]) -> bool:
if llm is None:
return False
response = llm.complete(
"The capital of Tamilnadu is ",
temperature=0.003,
)
response_lower_case: str = response.text.lower()
find_match = re.search("chennai", response_lower_case)
if find_match:
return True
else:
return False
def test_connection(self) -> bool:
try:
llm = self.get_llm_instance()
test_result: bool = self._test_llm_instance(llm=llm)
except Exception as e:
# Avoids circular import errors
from unstract.sdk.adapters.llm.exceptions import parse_llm_err
err = parse_llm_err(e)
msg = f"Error while testing LLM '{self.get_name()}'. {str(err)}"
err.message = msg
raise err from e
return test_result
def get_context_window_size(self) -> int:
"""Get the context window size supported by the LLM.
Note: None of the derived classes implement this method
Returns:
int: Context window size supported by the LLM
"""
context_window_size: int = 0
llm = self.get_llm_instance()
if llm:
context_window_size = llm.metadata.context_window
return context_window_size
| 2,992 | Python | .py | 89 | 25.426966 | 76 | 0.615144 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,744 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/__init__.py | from unstract.sdk.adapters import AdapterDict
from unstract.sdk.adapters.llm.register import LLMRegistry
adapters: AdapterDict = {}
LLMRegistry.register_adapters(adapters)
| 173 | Python | .py | 4 | 42 | 58 | 0.863095 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,745 | exceptions.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/exceptions.py | from anthropic import APIError as AnthropicAPIError
from google.api_core.exceptions import GoogleAPICallError
from mistralai.exceptions import MistralException
from openai import APIError as OpenAIAPIError
from vertexai.generative_models import ResponseValidationError
from unstract.sdk.adapters.exceptions import LLMError
from unstract.sdk.adapters.llm.anthropic.src import AnthropicLLM
from unstract.sdk.adapters.llm.mistral.src import MistralLLM
from unstract.sdk.adapters.llm.open_ai.src import OpenAILLM
from unstract.sdk.adapters.llm.palm.src import PaLMLLM
from unstract.sdk.adapters.llm.vertex_ai.src import VertexAILLM
def parse_llm_err(e: Exception) -> LLMError:
"""Parses the exception from LLM provider.
Helps parse the LLM error and wraps it with our
custom exception object to contain a user friendly message.
Args:
e (Exception): Error from LLM provider
Returns:
LLMError: Unstract's LLMError object
"""
if isinstance(e, ResponseValidationError):
return VertexAILLM.parse_llm_err(e)
elif isinstance(e, OpenAIAPIError):
return OpenAILLM.parse_llm_err(e)
elif isinstance(e, AnthropicAPIError):
return AnthropicLLM.parse_llm_err(e)
elif isinstance(e, MistralException):
return MistralLLM.parse_llm_err(e)
elif isinstance(e, GoogleAPICallError):
return PaLMLLM.parse_llm_err(e)
return LLMError(str(e))
| 1,423 | Python | .py | 31 | 41.354839 | 64 | 0.784271 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,746 | register.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/register.py | import logging
import os
from importlib import import_module
from typing import Any
from unstract.sdk.adapters.constants import Common
from unstract.sdk.adapters.llm.llm_adapter import LLMAdapter
from unstract.sdk.adapters.registry import AdapterRegistry
logger = logging.getLogger(__name__)
class LLMRegistry(AdapterRegistry):
@staticmethod
def register_adapters(adapters: dict[str, Any]) -> None:
current_directory = os.path.dirname(os.path.abspath(__file__))
package = "unstract.sdk.adapters.llm"
for adapter in os.listdir(current_directory):
adapter_path = os.path.join(current_directory, adapter, Common.SRC_FOLDER)
# Check if the item is a directory and not a
# special directory like _pycache__
if os.path.isdir(adapter_path) and not adapter.startswith("__"):
LLMRegistry._build_adapter_list(adapter, package, adapters)
if len(adapters) == 0:
logger.warning("No llm adapter found.")
@staticmethod
def _build_adapter_list(
adapter: str, package: str, adapters: dict[str, Any]
) -> None:
try:
full_module_path = f"{package}.{adapter}.{Common.SRC_FOLDER}"
module = import_module(full_module_path)
metadata = getattr(module, Common.METADATA, {})
if metadata.get("is_active", False):
adapter_class: LLMAdapter = metadata[Common.ADAPTER]
adapter_id = adapter_class.get_id()
if not adapter_id or (adapter_id in adapters):
logger.warning(f"Duplicate Id : {adapter_id}")
else:
adapters[adapter_id] = {
Common.MODULE: module,
Common.METADATA: metadata,
}
except ModuleNotFoundError as exception:
logger.warning(f"Unable to import llm adapters : {exception}")
| 1,946 | Python | .py | 41 | 36.95122 | 86 | 0.629805 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,747 | anthropic.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/anthropic/src/anthropic.py | import os
from typing import Any
from anthropic import APIError
from llama_index.core.llms import LLM
from llama_index.llms.anthropic import Anthropic
from llama_index.llms.anthropic.base import DEFAULT_ANTHROPIC_MAX_TOKENS
from unstract.sdk.adapters.exceptions import AdapterError, LLMError
from unstract.sdk.adapters.llm.constants import LLMKeys
from unstract.sdk.adapters.llm.llm_adapter import LLMAdapter
class Constants:
MODEL = "model"
API_KEY = "api_key"
TIMEOUT = "timeout"
MAX_RETRIES = "max_retries"
MAX_TOKENS = "max_tokens"
class AnthropicLLM(LLMAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("Anthropic")
self.config = settings
@staticmethod
def get_id() -> str:
return "anthropic|90ebd4cd-2f19-4cef-a884-9eeb6ac0f203"
@staticmethod
def get_name() -> str:
return "Anthropic"
@staticmethod
def get_description() -> str:
return "Anthropic LLM"
@staticmethod
def get_provider() -> str:
return "anthropic"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/Anthropic.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_llm_instance(self) -> LLM:
max_tokens = int(
self.config.get(Constants.MAX_TOKENS, DEFAULT_ANTHROPIC_MAX_TOKENS)
)
try:
llm: LLM = Anthropic(
model=str(self.config.get(Constants.MODEL)),
api_key=str(self.config.get(Constants.API_KEY)),
timeout=float(
self.config.get(Constants.TIMEOUT, LLMKeys.DEFAULT_TIMEOUT)
),
max_retries=int(
self.config.get(Constants.MAX_RETRIES, LLMKeys.DEFAULT_MAX_RETRIES)
),
temperature=0,
max_tokens=max_tokens,
)
return llm
except Exception as e:
raise AdapterError(str(e))
@staticmethod
def parse_llm_err(e: APIError) -> LLMError:
"""Parse the error from Anthropic.
Helps parse errors from Anthropic and wraps with custom exception.
Args:
e (AnthropicAPIError): Exception from Anthropic
Returns:
LLMError: Error to be sent to the user
"""
msg = "Error from Anthropic. "
if hasattr(e, "body"):
if isinstance(e.body, dict) and "error" in e.body:
err = e.body["error"]
msg += err.get("message", e.message)
else:
msg += e.message
return LLMError(msg)
| 2,759 | Python | .py | 77 | 27.025974 | 87 | 0.612383 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,748 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/anthropic/src/__init__.py | from .anthropic import AnthropicLLM
metadata = {
"name": AnthropicLLM.__name__,
"version": "1.0.0",
"adapter": AnthropicLLM,
"description": "Anthropic LLM adapter",
"is_active": True,
}
__all__ = ["AnthropicLLM"]
| 235 | Python | .py | 9 | 22.666667 | 43 | 0.647321 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,749 | vertex_ai.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/vertex_ai/src/vertex_ai.py | import json
import logging
import os
from typing import Any
from google.auth.transport import requests as google_requests
from google.oauth2.service_account import Credentials
from llama_index.core.llms import LLM
from llama_index.llms.vertex import Vertex
from vertexai.generative_models import Candidate, FinishReason, ResponseValidationError
from vertexai.generative_models._generative_models import (
HarmBlockThreshold,
HarmCategory,
)
from unstract.sdk.adapters.exceptions import LLMError
from unstract.sdk.adapters.llm.constants import LLMKeys
from unstract.sdk.adapters.llm.llm_adapter import LLMAdapter
logger = logging.getLogger(__name__)
class Constants:
MODEL = "model"
PROJECT = "project"
JSON_CREDENTIALS = "json_credentials"
MAX_RETRIES = "max_retries"
MAX_TOKENS = "max_tokens"
DEFAULT_MAX_TOKENS = 2048
BLOCK_ONLY_HIGH = "BLOCK_ONLY_HIGH"
class SafetySettingsConstants:
SAFETY_SETTINGS = "safety_settings"
DANGEROUS_CONTENT = "dangerous_content"
HATE_SPEECH = "hate_speech"
HARASSMENT = "harassment"
SEXUAL_CONTENT = "sexual_content"
OTHER = "other"
UNSTRACT_VERTEX_SAFETY_THRESHOLD_MAPPING: dict[str, HarmBlockThreshold] = {
"HARM_BLOCK_THRESHOLD_UNSPECIFIED": HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED, # noqa: E501
"BLOCK_LOW_AND_ABOVE": HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
"BLOCK_MEDIUM_AND_ABOVE": HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
"BLOCK_ONLY_HIGH": HarmBlockThreshold.BLOCK_ONLY_HIGH,
"BLOCK_NONE": HarmBlockThreshold.BLOCK_NONE,
}
class VertexAILLM(LLMAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("VertexAILLM")
self.config = settings
@staticmethod
def get_id() -> str:
return "vertexai|78fa17a5-a619-47d4-ac6e-3fc1698fdb55"
@staticmethod
def get_name() -> str:
return "VertexAI"
@staticmethod
def get_description() -> str:
return "Vertex Gemini LLM"
@staticmethod
def get_provider() -> str:
return "vertex_ai"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/VertexAI.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_llm_instance(self) -> LLM:
input_credentials = self.config.get(Constants.JSON_CREDENTIALS, "{}")
try:
json_credentials = json.loads(input_credentials)
except json.JSONDecodeError:
raise LLMError(
"Credentials is not a valid service account JSON, "
"please provide a valid JSON."
)
credentials = Credentials.from_service_account_info(
info=json_credentials,
scopes=["https://www.googleapis.com/auth/cloud-platform"],
) # type: ignore
credentials.refresh(google_requests.Request()) # type: ignore
max_retries = int(
self.config.get(Constants.MAX_RETRIES, LLMKeys.DEFAULT_MAX_RETRIES)
)
max_tokens = int(
self.config.get(Constants.MAX_TOKENS, Constants.DEFAULT_MAX_TOKENS)
)
safety_settings_default_config: dict[str, str] = {
SafetySettingsConstants.DANGEROUS_CONTENT: Constants.BLOCK_ONLY_HIGH,
SafetySettingsConstants.HATE_SPEECH: Constants.BLOCK_ONLY_HIGH,
SafetySettingsConstants.HARASSMENT: Constants.BLOCK_ONLY_HIGH,
SafetySettingsConstants.SEXUAL_CONTENT: Constants.BLOCK_ONLY_HIGH,
SafetySettingsConstants.OTHER: Constants.BLOCK_ONLY_HIGH,
}
safety_settings_user_config: dict[str, str] = self.config.get(
SafetySettingsConstants.SAFETY_SETTINGS,
safety_settings_default_config,
)
vertex_safety_settings: dict[
HarmCategory, HarmBlockThreshold
] = self._get_vertex_safety_settings(safety_settings_user_config)
llm: LLM = Vertex(
project=str(self.config.get(Constants.PROJECT)),
model=str(self.config.get(Constants.MODEL)),
credentials=credentials,
temperature=0,
max_retries=max_retries,
max_tokens=max_tokens,
safety_settings=vertex_safety_settings,
)
return llm
def _get_vertex_safety_settings(
self, safety_settings_user_config: dict[str, str]
) -> dict[HarmCategory, HarmBlockThreshold]:
vertex_safety_settings: dict[HarmCategory, HarmBlockThreshold] = dict()
vertex_safety_settings[
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT
] = UNSTRACT_VERTEX_SAFETY_THRESHOLD_MAPPING[
(
safety_settings_user_config.get(
SafetySettingsConstants.DANGEROUS_CONTENT,
Constants.BLOCK_ONLY_HIGH,
)
)
]
vertex_safety_settings[
HarmCategory.HARM_CATEGORY_HATE_SPEECH
] = UNSTRACT_VERTEX_SAFETY_THRESHOLD_MAPPING[
(
safety_settings_user_config.get(
SafetySettingsConstants.HATE_SPEECH,
Constants.BLOCK_ONLY_HIGH,
)
)
]
vertex_safety_settings[
HarmCategory.HARM_CATEGORY_HARASSMENT
] = UNSTRACT_VERTEX_SAFETY_THRESHOLD_MAPPING[
(
safety_settings_user_config.get(
SafetySettingsConstants.HARASSMENT,
Constants.BLOCK_ONLY_HIGH,
)
)
]
vertex_safety_settings[
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
] = UNSTRACT_VERTEX_SAFETY_THRESHOLD_MAPPING[
(
safety_settings_user_config.get(
SafetySettingsConstants.SEXUAL_CONTENT,
Constants.BLOCK_ONLY_HIGH,
)
)
]
vertex_safety_settings[
HarmCategory.HARM_CATEGORY_UNSPECIFIED
] = UNSTRACT_VERTEX_SAFETY_THRESHOLD_MAPPING[
(
safety_settings_user_config.get(
SafetySettingsConstants.OTHER, Constants.BLOCK_ONLY_HIGH
)
)
]
return vertex_safety_settings
@staticmethod
def parse_llm_err(e: ResponseValidationError) -> LLMError:
"""Parse the error from Vertex AI.
Helps parse and raise errors from Vertex AI.
https://ai.google.dev/api/generate-content#generatecontentresponse
Args:
e (ResponseValidationError): Exception from Vertex AI
Returns:
LLMError: Error to be sent to the user
"""
assert len(e.responses) == 1, (
"Expected e.responses to contain a single element "
"since its a completion call and not chat."
)
resp = e.responses[0]
candidates: list["Candidate"] = resp.candidates
if not candidates:
msg = str(resp.prompt_feedback)
reason_messages = {
FinishReason.MAX_TOKENS: (
"The maximum number of tokens for the LLM has been reached. Please "
"either tweak your prompts or try using another LLM."
),
FinishReason.STOP: (
"The LLM stopped generating a response due to the natural stop "
"point of the model or a provided stop sequence."
),
FinishReason.SAFETY: "The LLM response was flagged for safety reasons.",
FinishReason.RECITATION: (
"The LLM response was flagged for recitation reasons."
),
FinishReason.BLOCKLIST: (
"The LLM response generation was stopped because it "
"contains forbidden terms."
),
FinishReason.PROHIBITED_CONTENT: (
"The LLM response generation was stopped because it "
"potentially contains prohibited content."
),
FinishReason.SPII: (
"The LLM response generation was stopped because it potentially "
"contains Sensitive Personally Identifiable Information."
),
}
err_list = []
for candidate in candidates:
reason: FinishReason = candidate.finish_reason
if candidate.finish_message:
err_msg = candidate.finish_message
else:
err_msg = reason_messages.get(reason, str(candidate))
err_list.append(err_msg)
msg = "\n\nAnother error: \n".join(err_list)
return LLMError(msg)
| 8,751 | Python | .py | 216 | 30.310185 | 106 | 0.625602 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,750 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/vertex_ai/src/__init__.py | from .vertex_ai import VertexAILLM
metadata = {
"name": VertexAILLM.__name__,
"version": "1.0.0",
"adapter": VertexAILLM,
"description": "VertexAI LLM adapter",
"is_active": True,
}
__all__ = ["VertexAILLM"]
| 230 | Python | .py | 9 | 22.111111 | 42 | 0.634703 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,751 | bedrock.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/bedrock/src/bedrock.py | import os
from typing import Any, Optional
from llama_index.core.llms import LLM
from llama_index.llms.bedrock import Bedrock
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.llm.constants import LLMKeys
from unstract.sdk.adapters.llm.llm_adapter import LLMAdapter
class Constants:
MODEL = "model"
API_KEY = "api_key"
TIMEOUT = "timeout"
MAX_RETRIES = "max_retries"
SECRET_ACCESS_KEY = "aws_secret_access_key"
ACCESS_KEY_ID = "aws_access_key_id"
REGION_NAME = "region_name"
CONTEXT_SIZE = "context_size"
MAX_TOKENS = "max_tokens"
DEFAULT_MAX_TOKENS = 512 # Default at llama-index
class BedrockLLM(LLMAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("Bedrock")
self.config = settings
@staticmethod
def get_id() -> str:
return "bedrock|8d18571f-5e96-4505-bd28-ad0379c64064"
@staticmethod
def get_name() -> str:
return "Bedrock"
@staticmethod
def get_description() -> str:
return "Bedrock LLM"
@staticmethod
def get_provider() -> str:
return "bedrock"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/Bedrock.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_llm_instance(self) -> LLM:
try:
context_size: Optional[int] = (
int(self.config.get(Constants.CONTEXT_SIZE, 0))
if self.config.get(Constants.CONTEXT_SIZE)
else None
)
max_tokens = int(
self.config.get(Constants.MAX_TOKENS, Constants.DEFAULT_MAX_TOKENS)
)
llm: LLM = Bedrock(
model=self.config.get(Constants.MODEL),
aws_access_key_id=self.config.get(Constants.ACCESS_KEY_ID),
aws_secret_access_key=self.config.get(Constants.SECRET_ACCESS_KEY),
region_name=self.config.get(Constants.REGION_NAME),
timeout=float(
self.config.get(Constants.TIMEOUT, LLMKeys.DEFAULT_TIMEOUT)
),
max_retries=int(
self.config.get(Constants.MAX_RETRIES, LLMKeys.DEFAULT_MAX_RETRIES)
),
temperature=0,
context_size=context_size,
max_tokens=max_tokens,
)
return llm
except Exception as e:
raise AdapterError(str(e))
| 2,626 | Python | .py | 71 | 27.690141 | 87 | 0.607002 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,752 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/bedrock/src/__init__.py | from .bedrock import BedrockLLM
metadata = {
"name": BedrockLLM.__name__,
"version": "1.0.0",
"adapter": BedrockLLM,
"description": "Bedrock LLM adapter",
"is_active": True,
}
__all__ = ["BedrockLLM"]
| 223 | Python | .py | 9 | 21.333333 | 41 | 0.627358 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,753 | palm.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/palm/src/palm.py | import os
from typing import Any, Optional
from google.api_core.exceptions import GoogleAPICallError
from llama_index.core.llms import LLM
from llama_index.llms.palm import PaLM
from unstract.sdk.adapters.llm.llm_adapter import LLMAdapter
from unstract.sdk.exceptions import LLMError
class Constants:
MODEL = "model_name"
API_KEY = "api_key"
NUM_OUTPUT = "num_output"
API_TYPE = "palm"
DEFAULT_MAX_TOKENS = 1024
class PaLMLLM(LLMAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("PaLM")
self.config = settings
@staticmethod
def get_id() -> str:
return "palm|af7c8ee7-3d01-47c5-9b81-5ffd7546014b"
@staticmethod
def get_name() -> str:
return "Palm"
@staticmethod
def get_description() -> str:
return "Palm LLM"
@staticmethod
def get_provider() -> str:
return "palm"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/PaLM.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_llm_instance(self) -> LLM:
try:
num_output: Optional[int] = (
int(self.config.get(Constants.NUM_OUTPUT, Constants.DEFAULT_MAX_TOKENS))
if self.config.get(Constants.NUM_OUTPUT) is not None
else None
)
llm: LLM = PaLM(
model=str(self.config.get(Constants.MODEL)),
api_key=str(self.config.get(Constants.API_KEY)),
num_output=num_output,
api_type=Constants.API_TYPE,
temperature=0,
)
return llm
except Exception as e:
# To avoid circular import errors
from unstract.sdk.adapters.llm.exceptions import parse_llm_err
raise parse_llm_err(e)
@staticmethod
def parse_llm_err(e: GoogleAPICallError) -> LLMError:
"""Parse the error from PaLM.
Helps parse errors from PaLM and wraps with custom exception.
Args:
e (OpenAIAPIError): Exception from PaLM
Returns:
LLMError: Error to be sent to the user
"""
return LLMError(f"Error from PaLM. {e.message}")
| 2,366 | Python | .py | 67 | 26.80597 | 88 | 0.614912 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,754 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/palm/src/__init__.py | from .palm import PaLMLLM
metadata = {
"name": PaLMLLM.__name__,
"version": "1.0.0",
"adapter": PaLMLLM,
"description": "Palm LLM adapter",
"is_active": True,
}
__all__ = ["PaLMLLM"]
| 205 | Python | .py | 9 | 19.333333 | 38 | 0.592784 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,755 | replicate.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/replicate/src/replicate.py | import os
from typing import Any
from llama_index.core.llms import LLM
from llama_index.llms.replicate import Replicate
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.llm.llm_adapter import LLMAdapter
class Constants:
MODEL = "model"
API_KEY = "api_key"
class ReplicateLLM(LLMAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("Replicate")
self.config = settings
@staticmethod
def get_id() -> str:
return "replicate|2715ce84-05af-4ab4-b8e9-67ac3211b81e"
@staticmethod
def get_name() -> str:
return "Replicate"
@staticmethod
def get_description() -> str:
return "Replicate LLM"
@staticmethod
def get_provider() -> str:
return "replicate"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/Replicate.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
@staticmethod
def can_write() -> bool:
return True
@staticmethod
def can_read() -> bool:
return True
def get_llm_instance(self) -> LLM:
try:
llm: LLM = Replicate(
model=str(self.config.get(Constants.MODEL)),
prompt_key=str(self.config.get(Constants.API_KEY)),
temperature=0,
)
return llm
except Exception as e:
raise AdapterError(str(e))
| 1,578 | Python | .py | 50 | 24.34 | 72 | 0.62657 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,756 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/replicate/src/__init__.py | from .replicate import ReplicateLLM
metadata = {
"name": ReplicateLLM.__name__,
"version": "1.0.0",
"adapter": ReplicateLLM,
"description": "Replicate LLM adapter",
"is_active": True,
}
__all__ = ["ReplicateLLM"]
| 235 | Python | .py | 9 | 22.666667 | 43 | 0.647321 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,757 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/open_ai/src/__init__.py | from .open_ai import OpenAILLM
metadata = {
"name": OpenAILLM.__name__,
"version": "1.0.0",
"adapter": OpenAILLM,
"description": "OpenAI LLM adapter",
"is_active": True,
}
__all__ = ["OpenAILLM"]
| 218 | Python | .py | 9 | 20.777778 | 40 | 0.613527 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,758 | open_ai.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/open_ai/src/open_ai.py | import os
from typing import Any
from llama_index.core.llms import LLM
from llama_index.llms.openai import OpenAI
from openai import APIError as OpenAIAPIError
from openai import RateLimitError as OpenAIRateLimitError
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.llm.constants import LLMKeys
from unstract.sdk.adapters.llm.llm_adapter import LLMAdapter
from unstract.sdk.exceptions import LLMError, RateLimitError
class Constants:
MODEL = "model"
API_KEY = "api_key"
MAX_RETRIES = "max_retries"
ADAPTER_NAME = "adapter_name"
TIMEOUT = "timeout"
API_BASE = "api_base"
API_VERSION = "api_version"
MAX_TOKENS = "max_tokens"
class OpenAILLM(LLMAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("OpenAI")
self.config = settings
@staticmethod
def get_id() -> str:
return "openai|502ecf49-e47c-445c-9907-6d4b90c5cd17"
@staticmethod
def get_name() -> str:
return "OpenAI"
@staticmethod
def get_description() -> str:
return "OpenAI LLM"
@staticmethod
def get_provider() -> str:
return "openai"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/OpenAI.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_llm_instance(self) -> LLM:
try:
max_tokens = self.config.get(Constants.MAX_TOKENS)
max_tokens = int(max_tokens) if max_tokens else None
llm: LLM = OpenAI(
model=str(self.config.get(Constants.MODEL)),
api_key=str(self.config.get(Constants.API_KEY)),
api_base=str(self.config.get(Constants.API_BASE)),
api_version=str(self.config.get(Constants.API_VERSION)),
max_retries=int(
self.config.get(Constants.MAX_RETRIES, LLMKeys.DEFAULT_MAX_RETRIES)
),
api_type="openai",
temperature=0,
timeout=float(
self.config.get(Constants.TIMEOUT, LLMKeys.DEFAULT_TIMEOUT)
),
max_tokens=max_tokens,
)
return llm
except Exception as e:
raise AdapterError(str(e))
@staticmethod
def parse_llm_err(e: OpenAIAPIError) -> LLMError:
"""Parse the error from OpenAI.
Helps parse errors from OpenAI and wraps with custom exception.
Args:
e (OpenAIAPIError): Exception from OpenAI
Returns:
LLMError: Error to be sent to the user
"""
msg = "Error from OpenAI. "
if hasattr(e, "body") and isinstance(e.body, dict) and "message" in e.body:
msg += e.body["message"]
else:
msg += e.message
if isinstance(e, OpenAIRateLimitError):
return RateLimitError(msg)
return LLMError(msg)
| 3,073 | Python | .py | 83 | 28.253012 | 87 | 0.619913 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,759 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/ollama/src/__init__.py | from .ollama import OllamaLLM
metadata = {
"name": OllamaLLM.__name__,
"version": "1.0.0",
"adapter": OllamaLLM,
"description": "Ollama LLM adapter",
"is_active": True,
}
__all__ = ["OllamaLLM"]
| 217 | Python | .py | 9 | 20.666667 | 40 | 0.616505 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,760 | ollama.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/ollama/src/ollama.py | import logging
import os
import re
from typing import Any
from httpx import ConnectError, HTTPStatusError
from llama_index.core.llms import LLM
from llama_index.llms.ollama import Ollama
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.llm.constants import LLMKeys
from unstract.sdk.adapters.llm.llm_adapter import LLMAdapter
logger = logging.getLogger(__name__)
class Constants:
MODEL = "model"
API_KEY = "api_key"
TIMEOUT = "timeout"
BASE_URL = "base_url"
JSON_MODE = "json_mode"
CONTEXT_WINDOW = "context_window"
MODEL_MISSING_ERROR = "try pulling it first"
class OllamaLLM(LLMAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("Ollama")
self.config = settings
@staticmethod
def get_id() -> str:
return "ollama|4b8bd31a-ce42-48d4-9d69-f29c12e0f276"
@staticmethod
def get_name() -> str:
return "Ollama"
@staticmethod
def get_description() -> str:
return "Ollama AI LLM"
@staticmethod
def get_provider() -> str:
return "ollama"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/ollama.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_llm_instance(self) -> LLM:
try:
llm: LLM = Ollama(
model=str(self.config.get(Constants.MODEL)),
base_url=str(self.config.get(Constants.BASE_URL)),
request_timeout=float(
self.config.get(Constants.TIMEOUT, LLMKeys.DEFAULT_TIMEOUT)
),
json_mode=False,
context_window=int(self.config.get(Constants.CONTEXT_WINDOW, 3900)),
temperature=0.01,
)
return llm
except ConnectError as connec_err:
logger.error(f"Ollama server not running : {connec_err}")
raise AdapterError(
"Unable to connect to Ollama`s Server, "
"please check if the server is up and running or"
"if it is accepting connections."
)
except Exception as exc:
logger.error(f"Error occured while getting llm instance:{exc}")
raise AdapterError(str(exc))
def test_connection(self) -> bool:
try:
llm = self.get_llm_instance()
if not llm:
return False
response = llm.complete(
"The capital of Tamilnadu is ",
temperature=0.003,
)
response_lower_case: str = response.text.lower()
find_match = re.search("chennai", response_lower_case)
if find_match:
return True
else:
return False
except HTTPStatusError as http_err:
if http_err.response:
if (
http_err.response.status_code == 404
and Constants.MODEL_MISSING_ERROR in http_err.response.text
):
logger.error(
f"Error occured while sending requst to the model{http_err}"
)
raise AdapterError(
"Model under use is not found. Try pulling it first."
)
raise AdapterError(
f"Some issue while communicating with the model. "
f"Details : {http_err.response.text}"
)
except Exception as e:
logger.error(f"Error occured while testing adapter {e}")
raise AdapterError(str(e))
| 3,774 | Python | .py | 101 | 26.613861 | 84 | 0.577681 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,761 | azure_open_ai.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/azure_open_ai/src/azure_open_ai.py | import os
from typing import Any
from llama_index.core.llms import LLM
from llama_index.llms.azure_openai import AzureOpenAI
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.llm.constants import LLMKeys
from unstract.sdk.adapters.llm.llm_adapter import LLMAdapter
class Constants:
MODEL = "model"
DEPLOYMENT_NAME = "deployment_name"
API_KEY = "api_key"
API_VERSION = "api_version"
MAX_RETRIES = "max_retries"
MAX_TOKENS = "max_tokens"
AZURE_ENDPONT = "azure_endpoint"
API_TYPE = "azure"
TIMEOUT = "timeout"
DEFAULT_MODEL = "gpt-35-turbo"
class AzureOpenAILLM(LLMAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("AzureOpenAI")
self.config = settings
@staticmethod
def get_id() -> str:
return "azureopenai|592d84b9-fe03-4102-a17e-6b391f32850b"
@staticmethod
def get_name() -> str:
return "AzureOpenAI"
@staticmethod
def get_description() -> str:
return "AzureOpenAI LLM"
@staticmethod
def get_provider() -> str:
return "azure"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/AzureopenAI.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_llm_instance(self) -> LLM:
max_retries = int(
self.config.get(Constants.MAX_RETRIES, LLMKeys.DEFAULT_MAX_RETRIES)
)
max_tokens = self.config.get(Constants.MAX_TOKENS)
max_tokens = int(max_tokens) if max_tokens else None
try:
llm: LLM = AzureOpenAI(
model=self.config.get(Constants.MODEL, Constants.DEFAULT_MODEL),
deployment_name=str(self.config.get(Constants.DEPLOYMENT_NAME)),
api_key=str(self.config.get(Constants.API_KEY)),
api_version=str(self.config.get(Constants.API_VERSION)),
azure_endpoint=str(self.config.get(Constants.AZURE_ENDPONT)),
api_type=Constants.API_TYPE,
temperature=0,
timeout=float(
self.config.get(Constants.TIMEOUT, LLMKeys.DEFAULT_TIMEOUT)
),
max_retries=max_retries,
max_tokens=max_tokens,
)
return llm
except Exception as e:
raise AdapterError(str(e))
| 2,508 | Python | .py | 67 | 28.955224 | 80 | 0.63056 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,762 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/azure_open_ai/src/__init__.py | from .azure_open_ai import AzureOpenAILLM
metadata = {
"name": AzureOpenAILLM.__name__,
"version": "1.0.0",
"adapter": AzureOpenAILLM,
"description": "AzureOpenAI LLM adapter",
"is_active": True,
}
__all__ = ["AzureOpenAILLM"]
| 249 | Python | .py | 9 | 24.222222 | 45 | 0.659664 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,763 | no_op_llm.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/no_op/src/no_op_llm.py | import logging
import os
import time
from typing import Any
from llama_index.core.llms import LLM
from unstract.sdk.adapters.llm.llm_adapter import LLMAdapter
from unstract.sdk.adapters.llm.no_op.src.no_op_custom_llm import NoOpCustomLLM
logger = logging.getLogger(__name__)
class NoOpLLM(LLMAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("NoOpLlm")
self.config = settings
@staticmethod
def get_id() -> str:
return "noOpLlm|f673a5a2-90f9-40f5-94c0-9fbc663b7553"
@staticmethod
def get_name() -> str:
return "No Op LLM"
@staticmethod
def get_description() -> str:
return "No Op LLM"
@staticmethod
def get_provider() -> str:
return "noOpLlm"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/noOpLlm.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_llm_instance(self) -> LLM:
llm: LLM = NoOpCustomLLM(wait_time=self.config.get("wait_time"))
return llm
def test_connection(self) -> bool:
llm = self.get_llm_instance()
if not llm:
return False
llm.complete(
"The capital of Tamilnadu is ",
temperature=0.003,
)
time.sleep(self.config.get("wait_time"))
return True
| 1,477 | Python | .py | 46 | 25.434783 | 78 | 0.633286 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,764 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/no_op/src/__init__.py | from unstract.sdk.adapters.llm.no_op.src.no_op_llm import NoOpLLM
metadata = {
"name": NoOpLLM.__name__,
"version": "1.0.0",
"adapter": NoOpLLM,
"description": "NoOp LLM adapter",
"is_active": True,
}
__all__ = ["NoOpLLM"]
| 245 | Python | .py | 9 | 23.777778 | 65 | 0.628205 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,765 | no_op_custom_llm.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/no_op/src/no_op_custom_llm.py | import time
from typing import Any
from llama_index.core.base.llms.types import (
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.core.llms.custom import CustomLLM
class NoOpCustomLLM(CustomLLM):
wait_time: float
def __init__(
self,
wait_time: float,
) -> None:
wait_time = wait_time
super().__init__(wait_time=wait_time)
@classmethod
def class_name(cls) -> str:
return "NoOpLLM"
def _generate_text(self) -> str:
# Returns a JSON here to support for all enforce types.
return '{ "response":"This is a sample response from a NoOp LLM Adapter."}'
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
time.sleep(self.wait_time)
response_text = self._generate_text()
return CompletionResponse(
text=response_text,
)
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
def gen_response() -> CompletionResponseGen:
response_text = self._generate_text()
yield CompletionResponse(
text=response_text,
delta=response_text,
)
time.sleep(self.wait_time)
return gen_response()
@property
def metadata(self) -> LLMMetadata:
"""Method to fetch LLM metadata. Overriden to extent Base class.
Returns:
LLMMetadata
"""
return LLMMetadata(num_output=-1)
| 1,586 | Python | .py | 48 | 25.25 | 83 | 0.624016 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,766 | mistral.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/mistral/src/mistral.py | import os
from typing import Any
from llama_index.core.llms import LLM
from llama_index.llms.mistralai import MistralAI
from llama_index.llms.mistralai.base import DEFAULT_MISTRALAI_MAX_TOKENS
from mistralai.exceptions import MistralException
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.llm.constants import LLMKeys
from unstract.sdk.adapters.llm.llm_adapter import LLMAdapter
from unstract.sdk.exceptions import LLMError
class Constants:
MODEL = "model"
API_KEY = "api_key"
TIMEOUT = "timeout"
MAX_RETRIES = "max_retries"
MAX_TOKENS = "max_tokens"
class MistralLLM(LLMAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("Mistral")
self.config = settings
@staticmethod
def get_id() -> str:
return "mistral|00f766a5-6d6d-47ea-9f6c-ddb1e8a94e82"
@staticmethod
def get_name() -> str:
return "Mistral AI"
@staticmethod
def get_description() -> str:
return "Mistral AI LLM"
@staticmethod
def get_provider() -> str:
return "mistral"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/Mistral%20AI.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_llm_instance(self) -> LLM:
max_retries = int(
self.config.get(Constants.MAX_RETRIES, LLMKeys.DEFAULT_MAX_RETRIES)
)
max_tokens = int(
self.config.get(Constants.MAX_RETRIES, DEFAULT_MISTRALAI_MAX_TOKENS)
)
try:
llm: LLM = MistralAI(
model=str(self.config.get(Constants.MODEL)),
api_key=str(self.config.get(Constants.API_KEY)),
temperature=0,
timeout=float(
self.config.get(Constants.TIMEOUT, LLMKeys.DEFAULT_TIMEOUT)
),
max_retries=max_retries,
max_tokens=max_tokens,
)
return llm
except Exception as e:
raise AdapterError(str(e))
@staticmethod
def parse_llm_err(e: MistralException) -> LLMError:
"""Parse the error from MistralAI.
Helps parse errors from MistralAI and wraps with custom exception.
Args:
e (OpenAIAPIError): Exception from MistralAI
Returns:
LLMError: Error to be sent to the user
"""
if e.message and e.message.find('"message":"Unauthorized"'):
return LLMError("Incorrect API key, please check the API key provided.")
return LLMError(f"Error from MistralAI. {e}")
| 2,746 | Python | .py | 74 | 28.905405 | 84 | 0.638795 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,767 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/mistral/src/__init__.py | from .mistral import MistralLLM
metadata = {
"name": MistralLLM.__name__,
"version": "1.0.0",
"adapter": MistralLLM,
"description": "Mistral LLM adapter",
"is_active": True,
}
__all__ = ["MistralLLM"]
| 223 | Python | .py | 9 | 21.333333 | 41 | 0.627358 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,768 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/any_scale/src/__init__.py | from .anyscale import AnyScaleLLM
metadata = {
"name": AnyScaleLLM.__name__,
"version": "1.0.0",
"adapter": AnyScaleLLM,
"description": "AnyScale LLM adapter",
"is_active": True,
}
__all__ = ["AnyScaleLLM"]
| 229 | Python | .py | 9 | 22 | 42 | 0.637615 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,769 | anyscale.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/llm/any_scale/src/anyscale.py | import os
from typing import Any
from llama_index.core.constants import DEFAULT_NUM_OUTPUTS
from llama_index.core.llms import LLM
from llama_index.llms.anyscale import Anyscale
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.llm.constants import LLMKeys
from unstract.sdk.adapters.llm.llm_adapter import LLMAdapter
class Constants:
MODEL = "model"
API_KEY = "api_key"
API_BASE = "api_base"
MAX_RETRIES = "max_retries"
ADDITIONAL_KWARGS = "additional_kwargs"
MAX_TOKENS = "max_tokens"
class AnyScaleLLM(LLMAdapter):
def __init__(self, settings: dict[str, Any]):
super().__init__("AnyScale")
self.config = settings
@staticmethod
def get_id() -> str:
return "anyscale|adec9815-eabc-4207-9389-79cb89952639"
@staticmethod
def get_name() -> str:
return "AnyScale"
@staticmethod
def get_description() -> str:
return "AnyScale LLM"
@staticmethod
def get_provider() -> str:
return "anyscale"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/anyscale.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_llm_instance(self) -> LLM:
try:
max_tokens = int(self.config.get(Constants.MAX_TOKENS, DEFAULT_NUM_OUTPUTS))
llm: LLM = Anyscale(
model=str(self.config.get(Constants.MODEL)),
api_key=str(self.config.get(Constants.API_KEY)),
api_base=str(self.config.get(Constants.API_BASE)),
additional_kwargs=self.config.get(Constants.ADDITIONAL_KWARGS),
max_retries=int(
self.config.get(Constants.MAX_RETRIES, LLMKeys.DEFAULT_MAX_RETRIES)
),
temperature=0,
max_tokens=max_tokens,
)
return llm
except Exception as e:
raise AdapterError(str(e))
| 2,092 | Python | .py | 57 | 28.596491 | 88 | 0.635015 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,770 | main.py | Zipstack_unstract-sdk/src/unstract/sdk/static/tool_template/v1/src/main.py | import sys
from typing import Any
from unstract.sdk.tool.base import BaseTool
from unstract.sdk.tool.entrypoint import ToolEntrypoint
# TODO: Rename tool's class
class ConcreteTool(BaseTool):
# TODO: Add any checks that need to be done before running the tool
def validate(self, input_file: str, settings: dict[str, Any]) -> None:
pass
def run(
self,
settings: dict[str, Any],
input_file: str,
output_dir: str,
) -> None:
# -------------- TODO: Add your code here ----------------
# 1. Read the input_file
# 2. Process on its contents
# 3. Write files to the output_dir which need to be copied to the
# destination.
# 4. Write the tool result TEXT or JSON
# TODO: Write tool result of dict or str
self.write_tool_result(data={})
if __name__ == "__main__":
args = sys.argv[1:]
# TODO: Rename tool's class
tool = ConcreteTool.from_tool_args(args=args)
ToolEntrypoint.launch(tool=tool, args=args)
| 1,039 | Python | .py | 28 | 31.035714 | 74 | 0.629851 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,771 | tool_gen.py | Zipstack_unstract-sdk/src/unstract/sdk/scripts/tool_gen.py | #!/usr/bin/env python
import argparse
import shutil
from importlib.resources import files
from pathlib import Path
def new_tool(args):
tool_name = args.tool_name
if tool_name is None:
print("Tool name is required")
exit(1)
location = args.location
if location is None:
print("Location is required")
exit(1)
overwrite = args.overwrite
print(f"Creating new tool {tool_name} at {location}")
# Check if folder exists
folder = Path(location).joinpath(tool_name)
if folder.exists():
if overwrite:
print("Folder exists, overwriting")
else:
print("Folder exists, exiting")
exit(1)
else:
folder.mkdir(parents=True, exist_ok=True)
source = Path(files("unstract.sdk").joinpath("static/tool_template/v1/"))
print(f"Copying files from {source} to {folder}")
# Copy all files in source to folder, recursively
shutil.copytree(source, folder, dirs_exist_ok=True)
def main() -> None:
parser = argparse.ArgumentParser(
prog="Unstract tool generator",
description="Script to generate a new Unstract tool",
epilog="Unstract SDK",
)
parser.add_argument(
"--command", type=str, help="Command to execute", required=True
)
parser.add_argument(
"--tool-name", type=str, help="Tool name", required=False
)
parser.add_argument(
"--location",
type=str,
help="Director to create the new tool in",
required=False,
)
parser.add_argument(
"--overwrite",
help="Overwrite existing tool",
required=False,
default=False,
action="store_true",
)
args = parser.parse_args()
command = str.upper(args.command)
if command == "NEW":
try:
new_tool(args)
except Exception as e:
print(f"Error creating new tool: {e}")
exit(1)
print("New tool created successfully")
else:
print("Command not supported")
exit(1)
if __name__ == "__main__":
main()
| 2,102 | Python | .py | 69 | 23.608696 | 77 | 0.619753 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,772 | test_ocr.py | Zipstack_unstract-sdk/tests/test_ocr.py | import json
import logging
import os
import unittest
from typing import Any
from dotenv import load_dotenv
from parameterized import parameterized
from unstract.sdk.ocr import OCR
from unstract.sdk.tool.base import BaseTool
load_dotenv()
logger = logging.getLogger(__name__)
def get_test_values(env_key: str) -> list[str]:
values = json.loads(os.environ.get(env_key))
return values
def get_env_value(env_key: str) -> str:
value = os.environ.get(env_key)
return value
class ToolOCRTest(unittest.TestCase):
class MockTool(BaseTool):
def run(
self,
params: dict[str, Any] = {},
settings: dict[str, Any] = {},
workflow_id: str = "",
) -> None:
pass
@classmethod
def setUpClass(cls):
cls.tool = cls.MockTool()
@parameterized.expand(get_test_values("OCR_TEST_VALUES"))
def test_get_ocr(self, adapter_instance_id):
tool_ocr = OCR(tool=self.tool)
ocr = tool_ocr.get_ocr(adapter_instance_id)
result = ocr.test_connection()
self.assertTrue(result)
input_file = get_env_value("INPUT_FILE_PATH")
output_file = get_env_value("OUTPUT_FILE_PATH")
if os.path.isfile(output_file):
os.remove(output_file)
output = ocr.process(input_file, output_file)
file_size = os.path.getsize(output_file)
self.assertGreater(file_size, 0)
if os.path.isfile(output_file):
os.remove(output_file)
with open(output_file, "w", encoding="utf-8") as f:
f.write(output)
f.close()
file_size = os.path.getsize(output_file)
self.assertGreater(file_size, 0)
if __name__ == "__main__":
unittest.main()
| 1,755 | Python | .py | 51 | 27.568627 | 61 | 0.63787 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,773 | test_x2text.py | Zipstack_unstract-sdk/tests/test_x2text.py | import json
import logging
import os
import unittest
from typing import Any
from dotenv import load_dotenv
from parameterized import parameterized
from unstract.sdk.tool.base import BaseTool
from unstract.sdk.x2txt import X2Text
load_dotenv()
logger = logging.getLogger(__name__)
def get_test_values(env_key: str) -> list[str]:
values = json.loads(os.environ.get(env_key))
return values
def get_env_value(env_key: str) -> str:
value = os.environ.get(env_key)
return value
class ToolX2TextTest(unittest.TestCase):
class MockTool(BaseTool):
def run(
self,
params: dict[str, Any] = {},
settings: dict[str, Any] = {},
workflow_id: str = "",
) -> None:
# Dummify method for dummy tool
pass
@classmethod
def setUpClass(cls):
cls.tool = cls.MockTool()
@parameterized.expand(get_test_values("X2TEXT_TEST_VALUES"))
def test_get_x2text(self, adapter_instance_id):
tool_x2text = X2Text(tool=self.tool)
x2text = tool_x2text.get_x2text(adapter_instance_id)
self.assertIsNotNone(x2text)
self.assertTrue(x2text.test_connection())
input_file = get_env_value("INPUT_FILE_PATH")
output_file = get_env_value("OUTPUT_FILE_PATH")
if os.path.isfile(output_file):
os.remove(output_file)
file_content = x2text.process(input_file, output_file)
file_size = os.path.getsize(output_file)
self.assertGreater(file_size, 0)
if os.path.isfile(output_file):
os.remove(output_file)
with open(output_file, "w", encoding="utf-8") as f:
f.write(file_content)
f.close()
file_size = os.path.getsize(output_file)
self.assertGreater(file_size, 0)
if __name__ == "__main__":
unittest.main()
| 1,862 | Python | .py | 52 | 28.788462 | 64 | 0.647518 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,774 | test_cache.py | Zipstack_unstract-sdk/tests/test_cache.py | import unittest
from typing import Any
from unstract.sdk.cache import ToolCache
from unstract.sdk.tool.base import BaseTool
# Requires the platform service to be run and
# PLATFORM_SERVICE_API_KEY env to be set
class UnstractToolCacheTest(unittest.TestCase):
class MockTool(BaseTool):
def run(
self,
params: dict[str, Any] = {},
settings: dict[str, Any] = {},
workflow_id: str = "",
) -> None:
# self.stream_log("Mock tool running")
pass
@classmethod
def setUpClass(cls):
cls.tool = cls.MockTool()
def test_set(self):
cache = ToolCache(
tool=self.tool, platform_host="http://localhost", platform_port=3001
)
result = cache.set(key="test_key", value="test_value")
self.assertTrue(result)
def test_get(self):
cache = ToolCache(
tool=self.tool, platform_host="http://localhost", platform_port=3001
)
cache.set(key="test_key", value="test_value")
result = cache.get(key="test_key")
self.assertEqual(result, "test_value")
def test_delete(self):
cache = ToolCache(
tool=self.tool, platform_host="http://localhost", platform_port=3001
)
cache.set(key="test_key", value="test_value")
result = cache.delete(key="test_key")
self.assertTrue(result)
result = cache.get(key="test_key")
self.assertIsNone(result)
if __name__ == "__main__":
unittest.main()
| 1,539 | Python | .py | 43 | 27.883721 | 80 | 0.61197 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,775 | test_vector_db.py | Zipstack_unstract-sdk/tests/test_vector_db.py | import json
import logging
import os
import unittest
from dotenv import load_dotenv
from llama_index.core import MockEmbedding
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
VectorStore,
)
from parameterized import parameterized
from unstract.adapters.vectordb.helper import VectorDBHelper
from unstract.sdk.tool.base import BaseTool
from unstract.sdk.vector_db import ToolVectorDB
load_dotenv()
logger = logging.getLogger(__name__)
def get_test_values(env_key: str) -> list[str]:
test_values = json.loads(os.environ.get(env_key))
return test_values
class ToolVectorDBTest(unittest.TestCase):
class MockTool(BaseTool):
def run(
self,
) -> None:
self.stream_log("Mock tool running")
def setUp(self) -> None:
self.tool = self.MockTool()
@parameterized.expand(
get_test_values("VECTOR_DB_TEST_VALUES")
# Works for Qdrant and Postgres
)
def test_get_vector_db(self, adapter_instance_id: str) -> None:
mock_embedding = MockEmbedding(embed_dim=1)
unstract_tool_vector_db = ToolVectorDB(tool=self.tool)
vector_store = unstract_tool_vector_db.get_vector_db(
adapter_instance_id, mock_embedding.embed_dim
)
self.assertIsNotNone(vector_store)
self.assertIsInstance(
vector_store, (BasePydanticVectorStore, VectorStore)
)
result = VectorDBHelper.test_vector_db_instance(vector_store)
self.assertEqual(result, True)
if __name__ == "__main__":
unittest.main()
| 1,589 | Python | .py | 45 | 29.577778 | 69 | 0.706074 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,776 | test_embedding.py | Zipstack_unstract-sdk/tests/test_embedding.py | import json
import os
import unittest
from dotenv import load_dotenv
from llama_index.core.embeddings import BaseEmbedding
from parameterized import parameterized
from unstract.sdk.embedding import ToolEmbedding
from unstract.sdk.tool.base import BaseTool
load_dotenv()
def get_test_values(env_key: str) -> list[str]:
test_values = json.loads(os.environ.get(env_key))
return test_values
class ToolEmbeddingTest(unittest.TestCase):
TEST_SNIPPET = "Hello, I am Unstract"
class MockTool(BaseTool):
def run(
self,
) -> None:
self.stream_log("Mock tool running")
def setUp(self) -> None:
self.tool = self.MockTool()
def run_embedding_test(self, adapter_instance_id):
embedding = ToolEmbedding(tool=self.tool)
embed_model = embedding.get_embedding(adapter_instance_id)
self.assertIsNotNone(embed_model)
self.assertIsInstance(embed_model, BaseEmbedding)
response = embed_model._get_text_embedding(
ToolEmbeddingTest.TEST_SNIPPET
)
self.assertIsNotNone(response)
@parameterized.expand(get_test_values("EMBEDDING_TEST_VALUES"))
def test_get_embedding(self, adapter_instance_id: str) -> None:
self.run_embedding_test(adapter_instance_id)
if __name__ == "__main__":
unittest.main()
| 1,344 | Python | .py | 35 | 32.457143 | 67 | 0.709877 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,777 | test_llm.py | Zipstack_unstract-sdk/tests/test_llm.py | import json
import logging
import os
import unittest
from typing import Any
from dotenv import load_dotenv
from parameterized import parameterized
from unstract.adapters.llm.helper import LLMHelper
from unstract.sdk.llm import ToolLLM
from unstract.sdk.tool.base import BaseTool
load_dotenv()
logger = logging.getLogger(__name__)
def get_test_values(env_key: str) -> list[str]:
test_values = json.loads(os.environ.get(env_key))
return test_values
class ToolLLMTest(unittest.TestCase):
class MockTool(BaseTool):
def run(
self,
params: dict[str, Any] = {},
settings: dict[str, Any] = {},
workflow_id: str = "",
) -> None:
# self.stream_log("Mock tool running")
pass
@classmethod
def setUpClass(cls):
cls.tool = cls.MockTool()
@parameterized.expand(
get_test_values("LLM_TEST_VALUES")
# AzureOpenAI (Works)
# OpenAI (Works)
# AnyScale (llm FAILS)
# Anthropic (llm.complete FAILS)
# 1. unsupported params: max_token, stop.
# TypeError: create() got an unexpected keyword argument
# 'max_tokens'
# 2. anthropic.APIConnectionError: Connection error.
# PaLM (Works)
# Errors
# 1. unexpected keyword argument 'max_tokens', 'stop'
# Replicate (llm.complete FAILS)
# Errors
# 1. replicate.exceptions.ReplicateError:
# You did not pass an authentication token
# Mistral (llm.complete FAILS)
# Errors
# 1.TypeError: chat() got an unexpected keyword argument 'stop'
)
def test_get_llm(self, adapter_instance_id):
tool_llm = ToolLLM(tool=self.tool)
llm = tool_llm.get_llm(adapter_instance_id)
self.assertIsNotNone(llm)
result = LLMHelper.test_llm_instance(llm)
logger.error(result)
self.assertEqual(result, True)
if __name__ == "__main__":
unittest.main()
| 1,987 | Python | .py | 58 | 27.310345 | 71 | 0.643006 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,778 | test_index.py | Zipstack_unstract-sdk/tests/test_index.py | import json
import logging
import os
import unittest
from typing import Any, Optional
from unittest.mock import Mock, patch
from dotenv import load_dotenv
from parameterized import parameterized
from unstract.sdk.index import Index
from unstract.sdk.tool.base import BaseTool
load_dotenv()
logger = logging.getLogger(__name__)
def get_test_values(env_key: str) -> list[str]:
test_values = json.loads(os.environ.get(env_key))
return test_values
class ToolLLMTest(unittest.TestCase):
class MockTool(BaseTool):
def run(
self,
params: dict[str, Any] = {},
settings: dict[str, Any] = {},
workflow_id: str = "",
) -> None:
# self.stream_log("Mock tool running")
pass
@classmethod
def setUpClass(cls):
cls.tool = cls.MockTool()
@patch(
"unstract.sdk.index.Index.generate_index_key",
Mock(
return_value="77843eb8d9e30ad56bfcb018c2633fa32feef2f0c09762b6b820c75664b64c1b"
),
)
def test_generate_file_id(self):
expected = "77843eb8d9e30ad56bfcb018c2633fa32feef2f0c09762b6b820c75664b64c1b"
index = Index(tool=self.tool)
actual = index.generate_file_id(
tool_id="8ac26867-7811-4dc7-a17b-b16d3b561583",
vector_db="81f1f6a8-cae8-4b8e-b2a4-57f80de512f6",
embedding="ecf998d6-ded0-4aca-acd1-372a21daf0f9",
x2text="59bc55fc-e2a7-48dd-ae93-794b4d81d46e",
chunk_size="1024",
chunk_overlap="128",
file_path="/a/b/c",
file_hash="045b1a67824592b67426f8e60c1f8328e8d2a35139f9983e0aa0a7b6f10915c3",
)
assert expected == actual
test_data = [
{
"vector_db": "81f1f6a8-cae8-4b8e-b2a4-57f80de512f6",
"embedding": "ecf998d6-ded0-4aca-acd1-372a21daf0f9",
"x2text": "59bc55fc-e2a7-48dd-ae93-794b4d81d46e",
"chunk_size": "1024",
"chunk_overlap": "128",
"file_path": "/a/b/c",
},
{
"vector_db": "81f1f6a8-cae8-4b8e-b2a4-57f80de512f6",
"embedding": "ecf998d6-ded0-4aca-acd1-372a21daf0f9",
"x2text": "59bc55fc-e2a7-48dd-ae93-794b4d81d46e",
"chunk_size": "1024",
"chunk_overlap": "128",
"file_path": "/a/b/c",
"file_hash": "045b1a67824592b67426f8e60c1f8328e8d2a35139f9983e0aa0a7b6f10915c3",
},
]
@parameterized.expand(test_data)
@patch(
"unstract.sdk.adapter.ToolAdapter.get_adapter_config",
Mock(return_value={}),
)
@patch(
"unstract.sdk.utils.ToolUtils.hash_str",
Mock(
return_value="77843eb8d9e30ad56bfcb018c2633fa32feef2f0c09762b6b820c75664b64c1b"
),
)
@patch(
"unstract.sdk.utils.ToolUtils.get_hash_from_file",
Mock(
return_value="ab940bb34a60d2a7876dd8e1bd1b22f5dc85936a9e2af3c49bfc888a1d944ff0"
),
)
def test_generate_index_key(
self,
vector_db: str,
embedding: str,
x2text: str,
chunk_size: str,
chunk_overlap: str,
file_path: Optional[str] = None,
file_hash: Optional[str] = None,
):
expected = "77843eb8d9e30ad56bfcb018c2633fa32feef2f0c09762b6b820c75664b64c1b"
index = Index(tool=self.tool)
actual = index.generate_index_key(
vector_db,
embedding,
x2text,
chunk_size,
chunk_overlap,
file_path,
file_hash,
)
assert expected == actual
if __name__ == "__main__":
unittest.main()
| 3,675 | Python | .py | 108 | 25.37963 | 92 | 0.613847 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,779 | test_entrypoint.py | Zipstack_unstract-sdk/tests/tool/test_entrypoint.py | import unittest
from io import StringIO
from typing import Any
from unittest.mock import patch
from unstract.sdk.constants import LogLevel
from unstract.sdk.tool.base import BaseTool
from unstract.sdk.tool.entrypoint import ToolEntrypoint
class UnstractSDKToolsEntrypointTest(unittest.TestCase):
INFO_MESSAGE = "Running a mock tool"
DEBUG_MESSAGE = "Example DEBUG message"
class MockTool(BaseTool):
def run(
self,
params: dict[str, Any] = {},
settings: dict[str, Any] = {},
workflow_id: str = "",
) -> None:
self.stream_log(UnstractSDKToolsEntrypointTest.INFO_MESSAGE)
self.stream_log(
UnstractSDKToolsEntrypointTest.DEBUG_MESSAGE, LogLevel.DEBUG
)
@classmethod
def setUpClass(cls):
cls.tool = cls.MockTool(log_level=LogLevel.DEBUG)
def _launch_tool(self, args: list[str]) -> str:
captured_output = StringIO()
with patch("sys.stdout", new=captured_output):
ToolEntrypoint.launch(tool=self.tool, args=args)
return captured_output.getvalue()
def test_spec(self):
args = [
"--command",
"SPEC",
"--workflow-id",
"00000000-0000-0000-0000-000000000000",
"--log-level",
"INFO",
]
captured_output_str = self._launch_tool(args=args)
self.assertIn("SPEC", captured_output_str)
def test_properties(self):
args = [
"--command",
"PROPERTIES",
"--workflow-id",
"00000000-0000-0000-0000-000000000000",
"--log-level",
"INFO",
]
captured_output_str = self._launch_tool(args=args)
self.assertIn("PROPERTIES", captured_output_str)
def test_variables(self):
args = [
"--command",
"VARIABLES",
"--workflow-id",
"00000000-0000-0000-0000-000000000000",
"--log-level",
"INFO",
]
captured_output_str = self._launch_tool(args=args)
self.assertIn("VARIABLES", captured_output_str)
def test_run(self):
args = [
"--command",
"RUN",
"--params",
"{}",
"--settings",
"{}",
"--workflow-id",
"00000000-0000-0000-0000-000000000000",
"--log-level",
"INFO",
]
captured_output_str = self._launch_tool(args=args)
self.assertIn(self.INFO_MESSAGE, captured_output_str)
def test_run_debug(self):
args = [
"--command",
"RUN",
"--params",
"{}",
"--settings",
"{}",
"--workflow-id",
"00000000-0000-0000-0000-000000000000",
"--log-level",
"DEBUG",
]
captured_output_str = self._launch_tool(args=args)
self.assertIn(self.DEBUG_MESSAGE, captured_output_str)
if __name__ == "__main__":
unittest.main()
| 3,090 | Python | .py | 94 | 22.968085 | 76 | 0.545424 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,780 | test_static.py | Zipstack_unstract-sdk/tests/tool/test_static.py | import unittest
from io import StringIO
from unittest.mock import patch
from unstract.sdk.tool.mixin import ToolConfigHelper
from unstract.sdk.tool.stream import StreamMixin
from unstract.sdk.utils import ToolUtils
class UnstractSDKToolsStaticTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tool = StreamMixin()
def test_spec(self):
spec = ToolUtils.json_to_str(
ToolConfigHelper.spec(spec_file="config/tool_spec.json")
)
self.assertIsNotNone(spec)
def test_stream_spec(self):
spec = ToolUtils.json_to_str(
ToolConfigHelper.spec(spec_file="config/tool_spec.json")
)
captured_output = StringIO()
with patch("sys.stdout", new=captured_output):
self.tool.stream_spec(spec)
captured_output_str = captured_output.getvalue()
# print(captured_output_str)
self.assertIn("SPEC", captured_output_str)
def test_properties(self):
properties = ToolUtils.json_to_str(
ToolConfigHelper.properties(
properties_file="config/tool_properties.json"
)
)
self.assertIsNotNone(properties)
def test_stream_properties(self):
properties = ToolUtils.json_to_str(
ToolConfigHelper.properties(
properties_file="config/tool_properties.json"
)
)
captured_output = StringIO()
with patch("sys.stdout", new=captured_output):
self.tool.stream_properties(properties)
captured_output_str = captured_output.getvalue()
# print(captured_output_str)
self.assertIn("PROPERTIES", captured_output_str)
def test_icon(self):
icon = ToolConfigHelper.icon(icon_file="config/icon.svg")
self.assertIsNotNone(icon)
def test_stream_icon(self):
icon = ToolConfigHelper.icon(icon_file="config/icon.svg")
captured_output = StringIO()
with patch("sys.stdout", new=captured_output):
self.tool.stream_icon(icon)
captured_output_str = captured_output.getvalue()
# print(captured_output_str)
self.assertIn("ICON", captured_output_str)
if __name__ == "__main__":
unittest.main()
| 2,252 | Python | .py | 57 | 31.157895 | 68 | 0.659341 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,781 | hub.ipynb | arojsubedi_Improved-YOLOv8s/examples/hub.ipynb | {
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "Ultralytics HUB",
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "FIzICjaph_Wy"
},
"source": [
"<a align=\"center\" href=\"https://hub.ultralytics.com\" target=\"_blank\">\n",
"<img width=\"1024\", src=\"https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png\"></a>\n",
"\n",
"<div align=\"center\">\n",
"\n",
"[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)\n",
"\n",
" <a href=\"https://github.com/ultralytics/hub/actions/workflows/ci.yaml\">\n",
" <img src=\"https://github.com/ultralytics/hub/actions/workflows/ci.yaml/badge.svg\" alt=\"CI CPU\"></a>\n",
" <a href=\"https://colab.research.google.com/github/ultralytics/hub/blob/master/hub.ipynb\">\n",
" <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
"\n",
"Welcome to the [Ultralytics](https://ultralytics.com/) HUB notebook!\n",
"\n",
"This notebook allows you to train [YOLOv5](https://github.com/ultralytics/yolov5) and [YOLOv8](https://github.com/ultralytics/ultralytics) üöÄ models using [HUB](https://hub.ultralytics.com/). Please browse the HUB <a href=\"https://docs.ultralytics.com/hub/\">Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/hub/issues/new/choose\">GitHub</a> for support, and join our <a href=\"https://ultralytics.com/discord\">Discord</a> community for questions and discussions!\n",
"</div>"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "eRQ2ow94MiOv"
},
"source": [
"# Setup\n",
"\n",
"Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware."
]
},
{
"cell_type": "code",
"metadata": {
"id": "FyDnXd-n4c7Y",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "01e34b44-a26f-4dbc-a5a1-6e29bca01a1b"
},
"source": [
"%pip install ultralytics # install\n",
"from ultralytics import YOLO, checks, hub\n",
"checks() # checks"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"Ultralytics YOLOv8.0.210 üöÄ Python-3.10.12 torch-2.0.1+cu118 CUDA:0 (Tesla T4, 15102MiB)\n",
"Setup complete ‚úÖ (2 CPUs, 12.7 GB RAM, 24.4/78.2 GB disk)\n"
]
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "cQ9BwaAqxAm4"
},
"source": [
"# Start\n",
"\n",
"Login with your [API key](https://hub.ultralytics.com/settings?tab=api+keys), select your YOLO üöÄ model and start training!"
]
},
{
"cell_type": "code",
"metadata": {
"id": "XSlZaJ9Iw_iZ"
},
"source": [
"hub.login('API_KEY') # use your API key\n",
"\n",
"model = YOLO('https://hub.ultralytics.com/MODEL_ID') # use your model URL\n",
"results = model.train() # train model"
],
"execution_count": null,
"outputs": []
}
]
}
| 4,064 | Python | .py | 106 | 30.962264 | 513 | 0.553815 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,782 | heatmaps.ipynb | arojsubedi_Improved-YOLOv8s/examples/heatmaps.ipynb | {
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"gpuType": "T4"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"source": [
"<div align=\"center\">\n",
"\n",
" <a href=\"https://ultralytics.com/yolov8\" target=\"_blank\">\n",
" <img width=\"1024\", src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png\"></a>\n",
"\n",
" [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)\n",
"\n",
" <a href=\"https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/heatmaps.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
"\n",
"Welcome to the Ultralytics YOLOv8 üöÄ notebook! <a href=\"https://github.com/ultralytics/ultralytics\">YOLOv8</a> is the latest version of the YOLO (You Only Look Once) AI models developed by <a href=\"https://ultralytics.com\">Ultralytics</a>. This notebook serves as the starting point for exploring the <a href=\"https://docs.ultralytics.com/guides/heatmaps/\">heatmaps</a> and understand its features and capabilities.\n",
"\n",
"YOLOv8 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n",
"\n",
"We hope that the resources in this notebook will help you get the most out of <a href=\"https://docs.ultralytics.com/guides/heatmaps/\">Ultralytics Heatmaps</a>. Please browse the YOLOv8 <a href=\"https://docs.ultralytics.com/\">Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/ultralytics\">GitHub</a> for support, and join our <a href=\"https://ultralytics.com/discord\">Discord</a> community for questions and discussions!\n",
"\n",
"</div>"
],
"metadata": {
"id": "PN1cAxdvd61e"
}
},
{
"cell_type": "markdown",
"source": [
"# Setup\n",
"\n",
"Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware."
],
"metadata": {
"id": "o68Sg1oOeZm2"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "9dSwz_uOReMI"
},
"outputs": [],
"source": [
"!pip install ultralytics"
]
},
{
"cell_type": "markdown",
"source": [
"# Ultralytics Heatmaps\n",
"\n",
"Heatmap is color-coded matrix, generated by Ultralytics YOLOv8, simplifies intricate data by using vibrant colors. This visual representation employs warmer hues for higher intensities and cooler tones for lower values. Heatmaps are effective in illustrating complex data patterns, correlations, and anomalies, providing a user-friendly and engaging way to interpret data across various domains."
],
"metadata": {
"id": "m7VkxQ2aeg7k"
}
},
{
"cell_type": "code",
"source": [
"from ultralytics import YOLO\n",
"from ultralytics.solutions import heatmap\n",
"import cv2\n",
"\n",
"model = YOLO(\"yolov8n.pt\")\n",
"cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
"assert cap.isOpened(), \"Error reading video file\"\n",
"w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))\n",
"\n",
"# Video writer\n",
"video_writer = cv2.VideoWriter(\"heatmap_output.avi\",\n",
" cv2.VideoWriter_fourcc(*'mp4v'),\n",
" fps,\n",
" (w, h))\n",
"\n",
"# Init heatmap\n",
"heatmap_obj = heatmap.Heatmap()\n",
"heatmap_obj.set_args(colormap=cv2.COLORMAP_PARULA,\n",
" imw=w,\n",
" imh=h,\n",
" view_img=True,\n",
" shape=\"circle\")\n",
"\n",
"while cap.isOpened():\n",
" success, im0 = cap.read()\n",
" if not success:\n",
" print(\"Video frame is empty or video processing has been successfully completed.\")\n",
" break\n",
" tracks = model.track(im0, persist=True, show=False)\n",
"\n",
" im0 = heatmap_obj.generate_heatmap(im0, tracks)\n",
" video_writer.write(im0)\n",
"\n",
"cap.release()\n",
"video_writer.release()\n",
"cv2.destroyAllWindows()"
],
"metadata": {
"id": "Cx-u59HQdu2o"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"#Community Support\n",
"\n",
"For more information, you can explore <a href=\"https://docs.ultralytics.com/guides/heatmaps/#heatmap-colormaps\">Ultralytics Heatmaps Docs</a>\n",
"\n",
"Ultralytics ‚ö° resources\n",
"- About Us – https://ultralytics.com/about\n",
"- Join Our Team – https://ultralytics.com/work\n",
"- Contact Us – https://ultralytics.com/contact\n",
"- Discord – https://discord.gg/2wNGbc6g9X\n",
"- Ultralytics License – https://ultralytics.com/license\n",
"\n",
"YOLOv8 üöÄ resources\n",
"- GitHub – https://github.com/ultralytics/ultralytics\n",
"- Docs – https://docs.ultralytics.com/"
],
"metadata": {
"id": "QrlKg-y3fEyD"
}
}
]
} | 6,440 | Python | .py | 145 | 36.841379 | 506 | 0.568615 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,783 | tutorial.ipynb | arojsubedi_Improved-YOLOv8s/examples/tutorial.ipynb | {
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "YOLOv8 Tutorial",
"provenance": [],
"toc_visible": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "t6MPjfT5NrKQ"
},
"source": [
"<div align=\"center\">\n",
"\n",
" <a href=\"https://ultralytics.com/yolov8\" target=\"_blank\">\n",
" <img width=\"1024\", src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png\"></a>\n",
"\n",
" [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)\n",
"\n",
" <a href=\"https://console.paperspace.com/github/ultralytics/ultralytics\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"/></a>\n",
" <a href=\"https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
" <a href=\"https://www.kaggle.com/ultralytics/yolov8\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
"\n",
"Welcome to the Ultralytics YOLOv8 üöÄ notebook! <a href=\"https://github.com/ultralytics/ultralytics\">YOLOv8</a> is the latest version of the YOLO (You Only Look Once) AI models developed by <a href=\"https://ultralytics.com\">Ultralytics</a>. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLOv8 and understand its features and capabilities.\n",
"\n",
"YOLOv8 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n",
"\n",
"We hope that the resources in this notebook will help you get the most out of YOLOv8. Please browse the YOLOv8 <a href=\"https://docs.ultralytics.com/\">Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/ultralytics\">GitHub</a> for support, and join our <a href=\"https://ultralytics.com/discord\">Discord</a> community for questions and discussions!\n",
"\n",
"</div>"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "7mGmQbAO5pQb"
},
"source": [
"# Setup\n",
"\n",
"Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware."
]
},
{
"cell_type": "code",
"metadata": {
"id": "wbvMlHd_QwMG",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "27ca383c-0a97-4679-f1c5-ba843f033de7"
},
"source": [
"%pip install ultralytics\n",
"import ultralytics\n",
"ultralytics.checks()"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"Ultralytics YOLOv8.0.145 üöÄ Python-3.10.6 torch-2.0.1+cu118 CUDA:0 (Tesla T4, 15102MiB)\n",
"Setup complete ‚úÖ (2 CPUs, 12.7 GB RAM, 24.2/78.2 GB disk)\n"
]
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "4JnkELT0cIJg"
},
"source": [
"# 1. Predict\n",
"\n",
"YOLOv8 may be used directly in the Command Line Interface (CLI) with a `yolo` command for a variety of tasks and modes and accepts additional arguments, i.e. `imgsz=640`. See a full list of available `yolo` [arguments](https://docs.ultralytics.com/usage/cfg/) and other details in the [YOLOv8 Predict Docs](https://docs.ultralytics.com/modes/train/).\n"
]
},
{
"cell_type": "code",
"metadata": {
"id": "zR9ZbuQCH7FX",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "64489d1f-e71a-44b5-92f6-2088781ca096"
},
"source": [
"# Run inference on an image with YOLOv8n\n",
"!yolo predict model=yolov8n.pt source='https://ultralytics.com/images/zidane.jpg'"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Downloading https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8n.pt to 'yolov8n.pt'...\n",
"100% 6.23M/6.23M [00:00<00:00, 77.2MB/s]\n",
"Ultralytics YOLOv8.0.145 üöÄ Python-3.10.6 torch-2.0.1+cu118 CUDA:0 (Tesla T4, 15102MiB)\n",
"YOLOv8n summary (fused): 168 layers, 3151904 parameters, 0 gradients\n",
"\n",
"Downloading https://ultralytics.com/images/zidane.jpg to 'zidane.jpg'...\n",
"100% 165k/165k [00:00<00:00, 7.46MB/s]\n",
"image 1/1 /content/zidane.jpg: 384x640 2 persons, 1 tie, 365.8ms\n",
"Speed: 13.7ms preprocess, 365.8ms inference, 431.7ms postprocess per image at shape (1, 3, 384, 640)\n",
"Results saved to \u001b[1mruns/detect/predict\u001b[0m\n"
]
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "hkAzDWJ7cWTr"
},
"source": [
" \n",
"<img align=\"left\" src=\"https://user-images.githubusercontent.com/26833433/212889447-69e5bdf1-5800-4e29-835e-2ed2336dede2.jpg\" width=\"600\">"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0eq1SMWl6Sfn"
},
"source": [
"# 2. Val\n",
"Validate a model's accuracy on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset's `val` or `test` splits. The latest YOLOv8 [models](https://github.com/ultralytics/ultralytics#models) are downloaded automatically the first time they are used. See [YOLOv8 Val Docs](https://docs.ultralytics.com/modes/val/) for more information."
]
},
{
"cell_type": "code",
"metadata": {
"id": "WQPtK1QYVaD_"
},
"source": [
"# Download COCO val\n",
"import torch\n",
"torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n",
"!unzip -q tmp.zip -d datasets && rm tmp.zip # unzip"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "X58w8JLpMnjH",
"outputId": "e3aacd98-ceca-49b7-e112-a0c25979ad6c",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"source": [
"# Validate YOLOv8n on COCO8 val\n",
"!yolo val model=yolov8n.pt data=coco8.yaml"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Ultralytics YOLOv8.0.145 üöÄ Python-3.10.6 torch-2.0.1+cu118 CUDA:0 (Tesla T4, 15102MiB)\n",
"YOLOv8n summary (fused): 168 layers, 3151904 parameters, 0 gradients\n",
"\n",
"Dataset 'coco8.yaml' images not found ⚠️, missing path '/content/datasets/coco8/images/val'\n",
"Downloading https://ultralytics.com/assets/coco8.zip to '/content/datasets/coco8.zip'...\n",
"100% 433k/433k [00:00<00:00, 12.4MB/s]\n",
"Unzipping /content/datasets/coco8.zip to /content/datasets...\n",
"Dataset download success ‚úÖ (0.7s), saved to \u001b[1m/content/datasets\u001b[0m\n",
"\n",
"Downloading https://ultralytics.com/assets/Arial.ttf to '/root/.config/Ultralytics/Arial.ttf'...\n",
"100% 755k/755k [00:00<00:00, 17.5MB/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco8/labels/val... 4 images, 0 backgrounds, 0 corrupt: 100% 4/4 [00:00<00:00, 276.04it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco8/labels/val.cache\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 1/1 [00:03<00:00, 3.84s/it]\n",
" all 4 17 0.621 0.833 0.888 0.63\n",
" person 4 10 0.721 0.5 0.519 0.269\n",
" dog 4 1 0.37 1 0.995 0.597\n",
" horse 4 2 0.751 1 0.995 0.631\n",
" elephant 4 2 0.505 0.5 0.828 0.394\n",
" umbrella 4 1 0.564 1 0.995 0.995\n",
" potted plant 4 1 0.814 1 0.995 0.895\n",
"Speed: 0.3ms preprocess, 78.7ms inference, 0.0ms loss, 65.4ms postprocess per image\n",
"Results saved to \u001b[1mruns/detect/val\u001b[0m\n"
]
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ZY2VXXXu74w5"
},
"source": [
"# 3. Train\n",
"\n",
"<p align=\"\"><a href=\"https://bit.ly/ultralytics_hub\"><img width=\"1000\" src=\"https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png\"/></a></p>\n",
"\n",
"Train YOLOv8 on [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/), [Classify](https://docs.ultralytics.com/tasks/classify/) and [Pose](https://docs.ultralytics.com/tasks/pose/) datasets. See [YOLOv8 Train Docs](https://docs.ultralytics.com/modes/train/) for more information."
]
},
{
"cell_type": "code",
"source": [
"#@title Select YOLOv8 üöÄ logger {run: 'auto'}\n",
"logger = 'Comet' #@param ['Comet', 'TensorBoard']\n",
"\n",
"if logger == 'Comet':\n",
" %pip install -q comet_ml\n",
" import comet_ml; comet_ml.init()\n",
"elif logger == 'TensorBoard':\n",
" %load_ext tensorboard\n",
" %tensorboard --logdir ."
],
"metadata": {
"id": "ktegpM42AooT"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "1NcFxRcFdJ_O",
"outputId": "b750f2fe-c4d9-4764-b8d5-ed7bd920697b",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"source": [
"# Train YOLOv8n on COCO8 for 3 epochs\n",
"!yolo train model=yolov8n.pt data=coco8.yaml epochs=3 imgsz=640"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Ultralytics YOLOv8.0.145 üöÄ Python-3.10.6 torch-2.0.1+cu118 CUDA:0 (Tesla T4, 15102MiB)\n",
"\u001b[34m\u001b[1mengine/trainer: \u001b[0mtask=detect, mode=train, model=yolov8n.pt, data=coco8.yaml, epochs=3, patience=50, batch=16, imgsz=640, save=True, save_period=-1, cache=False, device=None, workers=8, project=None, name=None, exist_ok=False, pretrained=True, optimizer=auto, verbose=True, seed=0, deterministic=True, single_cls=False, rect=False, cos_lr=False, close_mosaic=10, resume=False, amp=True, fraction=1.0, profile=False, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, show=False, save_txt=False, save_conf=False, save_crop=False, show_labels=True, show_conf=True, vid_stride=1, line_width=None, visualize=False, augment=False, agnostic_nms=False, classes=None, retina_masks=False, boxes=True, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=False, opset=None, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, pose=12.0, kobj=1.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0, cfg=None, tracker=botsort.yaml, save_dir=runs/detect/train\n",
"\n",
" from n params module arguments \n",
" 0 -1 1 464 ultralytics.nn.modules.conv.Conv [3, 16, 3, 2] \n",
" 1 -1 1 4672 ultralytics.nn.modules.conv.Conv [16, 32, 3, 2] \n",
" 2 -1 1 7360 ultralytics.nn.modules.block.C2f [32, 32, 1, True] \n",
" 3 -1 1 18560 ultralytics.nn.modules.conv.Conv [32, 64, 3, 2] \n",
" 4 -1 2 49664 ultralytics.nn.modules.block.C2f [64, 64, 2, True] \n",
" 5 -1 1 73984 ultralytics.nn.modules.conv.Conv [64, 128, 3, 2] \n",
" 6 -1 2 197632 ultralytics.nn.modules.block.C2f [128, 128, 2, True] \n",
" 7 -1 1 295424 ultralytics.nn.modules.conv.Conv [128, 256, 3, 2] \n",
" 8 -1 1 460288 ultralytics.nn.modules.block.C2f [256, 256, 1, True] \n",
" 9 -1 1 164608 ultralytics.nn.modules.block.SPPF [256, 256, 5] \n",
" 10 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 11 [-1, 6] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 12 -1 1 148224 ultralytics.nn.modules.block.C2f [384, 128, 1] \n",
" 13 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 14 [-1, 4] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 15 -1 1 37248 ultralytics.nn.modules.block.C2f [192, 64, 1] \n",
" 16 -1 1 36992 ultralytics.nn.modules.conv.Conv [64, 64, 3, 2] \n",
" 17 [-1, 12] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 18 -1 1 123648 ultralytics.nn.modules.block.C2f [192, 128, 1] \n",
" 19 -1 1 147712 ultralytics.nn.modules.conv.Conv [128, 128, 3, 2] \n",
" 20 [-1, 9] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 21 -1 1 493056 ultralytics.nn.modules.block.C2f [384, 256, 1] \n",
" 22 [15, 18, 21] 1 897664 ultralytics.nn.modules.head.Detect [80, [64, 128, 256]] \n",
"Model summary: 225 layers, 3157200 parameters, 3157184 gradients\n",
"\n",
"Transferred 355/355 items from pretrained weights\n",
"\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/detect/train', view at http://localhost:6006/\n",
"\u001b[34m\u001b[1mAMP: \u001b[0mrunning Automatic Mixed Precision (AMP) checks with YOLOv8n...\n",
"\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ‚úÖ\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco8/labels/train... 4 images, 0 backgrounds, 0 corrupt: 100% 4/4 [00:00<00:00, 860.11it/s]\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco8/labels/train.cache\n",
"\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco8/labels/val.cache... 4 images, 0 backgrounds, 0 corrupt: 100% 4/4 [00:00<?, ?it/s]\n",
"Plotting labels to runs/detect/train/labels.jpg... \n",
"\u001b[34m\u001b[1moptimizer:\u001b[0m AdamW(lr=0.000119, momentum=0.9) with parameter groups 57 weight(decay=0.0), 64 weight(decay=0.0005), 63 bias(decay=0.0)\n",
"Image sizes 640 train, 640 val\n",
"Using 2 dataloader workers\n",
"Logging results to \u001b[1mruns/detect/train\u001b[0m\n",
"Starting training for 3 epochs...\n",
"\n",
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
" 1/3 0.761G 0.9273 3.155 1.291 32 640: 100% 1/1 [00:01<00:00, 1.23s/it]\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 1/1 [00:00<00:00, 2.21it/s]\n",
" all 4 17 0.613 0.899 0.888 0.621\n",
"\n",
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
" 2/3 0.78G 1.161 3.126 1.517 33 640: 100% 1/1 [00:00<00:00, 9.06it/s]\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 1/1 [00:00<00:00, 7.18it/s]\n",
" all 4 17 0.601 0.896 0.888 0.613\n",
"\n",
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
" 3/3 0.757G 0.9264 2.508 1.254 17 640: 100% 1/1 [00:00<00:00, 7.32it/s]\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 1/1 [00:00<00:00, 5.26it/s]\n",
" all 4 17 0.598 0.892 0.886 0.613\n",
"\n",
"3 epochs completed in 0.003 hours.\n",
"Optimizer stripped from runs/detect/train/weights/last.pt, 6.5MB\n",
"Optimizer stripped from runs/detect/train/weights/best.pt, 6.5MB\n",
"\n",
"Validating runs/detect/train/weights/best.pt...\n",
"Ultralytics YOLOv8.0.145 üöÄ Python-3.10.6 torch-2.0.1+cu118 CUDA:0 (Tesla T4, 15102MiB)\n",
"Model summary (fused): 168 layers, 3151904 parameters, 0 gradients\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 1/1 [00:00<00:00, 16.58it/s]\n",
" all 4 17 0.613 0.898 0.888 0.621\n",
" person 4 10 0.661 0.5 0.52 0.285\n",
" dog 4 1 0.337 1 0.995 0.597\n",
" horse 4 2 0.723 1 0.995 0.631\n",
" elephant 4 2 0.629 0.886 0.828 0.319\n",
" umbrella 4 1 0.55 1 0.995 0.995\n",
" potted plant 4 1 0.776 1 0.995 0.895\n",
"Speed: 0.2ms preprocess, 4.6ms inference, 0.0ms loss, 1.1ms postprocess per image\n",
"Results saved to \u001b[1mruns/detect/train\u001b[0m\n"
]
}
]
},
{
"cell_type": "markdown",
"source": [
"# 4. Export\n",
"\n",
"Export a YOLOv8 model to any supported format below with the `format` argument, i.e. `format=onnx`. See [YOLOv8 Export Docs](https://docs.ultralytics.com/modes/export/) for more information.\n",
"\n",
"- üí° ProTip: Export to [ONNX](https://onnx.ai/) or [OpenVINO](https://docs.openvino.ai/latest/index.html) for up to 3x CPU speedup. \n",
"- üí° ProTip: Export to [TensorRT](https://developer.nvidia.com/tensorrt) for up to 5x GPU speedup.\n",
"\n",
"\n",
"| Format | `format` Argument | Model | Metadata | Arguments |\n",
"|--------------------------------------------------------------------|-------------------|---------------------------|----------|-----------------------------------------------------|\n",
"| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ‚úÖ | - |\n",
"| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ‚úÖ | `imgsz`, `optimize` |\n",
"| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ‚úÖ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` |\n",
"| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ‚úÖ | `imgsz`, `half`, `int8` |\n",
"| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ‚úÖ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` |\n",
"| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlpackage` | ‚úÖ | `imgsz`, `half`, `int8`, `nms` |\n",
"| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ‚úÖ | `imgsz`, `keras`, `int8` |\n",
"| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ‚ùå | `imgsz` |\n",
"| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ‚úÖ | `imgsz`, `half`, `int8` |\n",
"| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ‚úÖ | `imgsz` |\n",
"| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ‚úÖ | `imgsz` |\n",
"| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ‚úÖ | `imgsz` |\n",
"| [ncnn](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n_ncnn_model/` | ‚úÖ | `imgsz`, `half` |\n"
],
"metadata": {
"id": "nPZZeNrLCQG6"
}
},
{
"cell_type": "code",
"source": [
"!yolo export model=yolov8n.pt format=torchscript"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "CYIjW4igCjqD",
"outputId": "2b65e381-717b-4a6f-d6f5-5254c867f3a4"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Ultralytics YOLOv8.0.145 üöÄ Python-3.10.6 torch-2.0.1+cu118 CPU (Intel Xeon 2.30GHz)\n",
"YOLOv8n summary (fused): 168 layers, 3151904 parameters, 0 gradients\n",
"\n",
"\u001b[34m\u001b[1mPyTorch:\u001b[0m starting from 'yolov8n.pt' with input shape (1, 3, 640, 640) BCHW and output shape(s) (1, 84, 8400) (6.2 MB)\n",
"\n",
"\u001b[34m\u001b[1mTorchScript:\u001b[0m starting export with torch 2.0.1+cu118...\n",
"\u001b[34m\u001b[1mTorchScript:\u001b[0m export success ‚úÖ 2.8s, saved as 'yolov8n.torchscript' (12.4 MB)\n",
"\n",
"Export complete (4.6s)\n",
"Results saved to \u001b[1m/content\u001b[0m\n",
"Predict: yolo predict task=detect model=yolov8n.torchscript imgsz=640 \n",
"Validate: yolo val task=detect model=yolov8n.torchscript imgsz=640 data=None \n",
"Visualize: https://netron.app\n"
]
}
]
},
{
"cell_type": "markdown",
"source": [
"# 5. Python Usage\n",
"\n",
"YOLOv8 was reimagined using Python-first principles for the most seamless Python YOLO experience yet. YOLOv8 models can be loaded from a trained checkpoint or created from scratch. Then methods are used to train, val, predict, and export the model. See detailed Python usage examples in the [YOLOv8 Python Docs](https://docs.ultralytics.com/usage/python/)."
],
"metadata": {
"id": "kUMOQ0OeDBJG"
}
},
{
"cell_type": "code",
"source": [
"from ultralytics import YOLO\n",
"\n",
"# Load a model\n",
"model = YOLO('yolov8n.yaml') # build a new model from scratch\n",
"model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training)\n",
"\n",
"# Use the model\n",
"results = model.train(data='coco128.yaml', epochs=3) # train the model\n",
"results = model.val() # evaluate model performance on the validation set\n",
"results = model('https://ultralytics.com/images/bus.jpg') # predict on an image\n",
"results = model.export(format='onnx') # export the model to ONNX format"
],
"metadata": {
"id": "bpF9-vS_DAaf"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# 6. Tasks\n",
"\n",
"YOLOv8 can train, val, predict and export models for the most common tasks in vision AI: [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/), [Classify](https://docs.ultralytics.com/tasks/classify/) and [Pose](https://docs.ultralytics.com/tasks/pose/). See [YOLOv8 Tasks Docs](https://docs.ultralytics.com/tasks/) for more information.\n",
"\n",
"<br><img width=\"1024\" src=\"https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-tasks.png\">\n"
],
"metadata": {
"id": "Phm9ccmOKye5"
}
},
{
"cell_type": "markdown",
"source": [
"## 1. Detection\n",
"\n",
"YOLOv8 _detection_ models have no suffix and are the default YOLOv8 models, i.e. `yolov8n.pt` and are pretrained on COCO. See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for full details.\n"
],
"metadata": {
"id": "yq26lwpYK1lq"
}
},
{
"cell_type": "code",
"source": [
"# Load YOLOv8n, train it on COCO128 for 3 epochs and predict an image with it\n",
"from ultralytics import YOLO\n",
"\n",
"model = YOLO('yolov8n.pt') # load a pretrained YOLOv8n detection model\n",
"model.train(data='coco128.yaml', epochs=3) # train the model\n",
"model('https://ultralytics.com/images/bus.jpg') # predict on an image"
],
"metadata": {
"id": "8Go5qqS9LbC5"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## 2. Segmentation\n",
"\n",
"YOLOv8 _segmentation_ models use the `-seg` suffix, i.e. `yolov8n-seg.pt` and are pretrained on COCO. See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for full details.\n"
],
"metadata": {
"id": "7ZW58jUzK66B"
}
},
{
"cell_type": "code",
"source": [
"# Load YOLOv8n-seg, train it on COCO128-seg for 3 epochs and predict an image with it\n",
"from ultralytics import YOLO\n",
"\n",
"model = YOLO('yolov8n-seg.pt') # load a pretrained YOLOv8n segmentation model\n",
"model.train(data='coco128-seg.yaml', epochs=3) # train the model\n",
"model('https://ultralytics.com/images/bus.jpg') # predict on an image"
],
"metadata": {
"id": "WFPJIQl_L5HT"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## 3. Classification\n",
"\n",
"YOLOv8 _classification_ models use the `-cls` suffix, i.e. `yolov8n-cls.pt` and are pretrained on ImageNet. See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for full details.\n"
],
"metadata": {
"id": "ax3p94VNK9zR"
}
},
{
"cell_type": "code",
"source": [
"# Load YOLOv8n-cls, train it on mnist160 for 3 epochs and predict an image with it\n",
"from ultralytics import YOLO\n",
"\n",
"model = YOLO('yolov8n-cls.pt') # load a pretrained YOLOv8n classification model\n",
"model.train(data='mnist160', epochs=3) # train the model\n",
"model('https://ultralytics.com/images/bus.jpg') # predict on an image"
],
"metadata": {
"id": "5q9Zu6zlL5rS"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## 4. Pose\n",
"\n",
"YOLOv8 _pose_ models use the `-pose` suffix, i.e. `yolov8n-pose.pt` and are pretrained on COCO Keypoints. See [Pose Docs](https://docs.ultralytics.com/tasks/pose/) for full details."
],
"metadata": {
"id": "SpIaFLiO11TG"
}
},
{
"cell_type": "code",
"source": [
"# Load YOLOv8n-pose, train it on COCO8-pose for 3 epochs and predict an image with it\n",
"from ultralytics import YOLO\n",
"\n",
"model = YOLO('yolov8n-pose.pt') # load a pretrained YOLOv8n classification model\n",
"model.train(data='coco8-pose.yaml', epochs=3) # train the model\n",
"model('https://ultralytics.com/images/bus.jpg') # predict on an image"
],
"metadata": {
"id": "si4aKFNg19vX"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "IEijrePND_2I"
},
"source": [
"# Appendix\n",
"\n",
"Additional content below."
]
},
{
"cell_type": "code",
"source": [
"# Pip install from source\n",
"!pip install git+https://github.com/ultralytics/ultralytics@main"
],
"metadata": {
"id": "pIdE6i8C3LYp"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Git clone and run tests on updates branch\n",
"!git clone https://github.com/ultralytics/ultralytics -b main\n",
"%pip install -qe ultralytics"
],
"metadata": {
"id": "uRKlwxSJdhd1"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Run tests (Git clone only)\n",
"!pytest ultralytics/tests"
],
"metadata": {
"id": "GtPlh7mcCGZX"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Validate multiple models\n",
"for x in 'nsmlx':\n",
" !yolo val model=yolov8{x}.pt data=coco.yaml"
],
"metadata": {
"id": "Wdc6t_bfzDDk"
},
"execution_count": null,
"outputs": []
}
]
}
| 33,450 | Python | .py | 616 | 45.545455 | 1,413 | 0.508924 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,784 | object_tracking.ipynb | arojsubedi_Improved-YOLOv8s/examples/object_tracking.ipynb | {
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"gpuType": "T4"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"source": [
"<div align=\"center\">\n",
"\n",
" <a href=\"https://ultralytics.com/yolov8\" target=\"_blank\">\n",
" <img width=\"1024\", src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png\"></a>\n",
"\n",
" [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)\n",
"\n",
" <a href=\"https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/object_tracking.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
"\n",
"Welcome to the Ultralytics YOLOv8 üöÄ notebook! <a href=\"https://github.com/ultralytics/ultralytics\">YOLOv8</a> is the latest version of the YOLO (You Only Look Once) AI models developed by <a href=\"https://ultralytics.com\">Ultralytics</a>. This notebook serves as the starting point for exploring the <a href=\"https://docs.ultralytics.com/modes/track/\">Object Tracking</a> and understand its features and capabilities.\n",
"\n",
"YOLOv8 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n",
"\n",
"We hope that the resources in this notebook will help you get the most out of <a href=\"https://docs.ultralytics.com/modes/track/\">Ultralytics Object Tracking</a>. Please browse the YOLOv8 <a href=\"https://docs.ultralytics.com/\">Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/ultralytics\">GitHub</a> for support, and join our <a href=\"https://ultralytics.com/discord\">Discord</a> community for questions and discussions!\n",
"\n",
"</div>"
],
"metadata": {
"id": "PN1cAxdvd61e"
}
},
{
"cell_type": "markdown",
"source": [
"# Setup\n",
"\n",
"Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware."
],
"metadata": {
"id": "o68Sg1oOeZm2"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "9dSwz_uOReMI"
},
"outputs": [],
"source": [
"!pip install ultralytics"
]
},
{
"cell_type": "markdown",
"source": [
"# Ultralytics Object Tracking\n",
"\n",
"Within the domain of video analytics, object tracking stands out as a crucial undertaking. It goes beyond merely identifying the location and class of objects within the frame; it also involves assigning a unique ID to each detected object as the video unfolds. The applications of this technology are vast, spanning from surveillance and security to real-time sports analytics."
],
"metadata": {
"id": "m7VkxQ2aeg7k"
}
},
{
"cell_type": "markdown",
"source": [
"## CLI"
],
"metadata": {
"id": "-ZF9DM6e6gz0"
}
},
{
"cell_type": "code",
"source": [
"!yolo track source=\"/path/to/video/file.mp4\" save=True"
],
"metadata": {
"id": "-XJqhOwo6iqT"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## Python\n",
"\n",
"- Draw Object tracking trails"
],
"metadata": {
"id": "XRcw0vIE6oNb"
}
},
{
"cell_type": "code",
"source": [
"import cv2\n",
"import numpy as np\n",
"from ultralytics import YOLO\n",
"\n",
"from ultralytics.utils.checks import check_imshow\n",
"from ultralytics.utils.plotting import Annotator, colors\n",
"\n",
"from collections import defaultdict\n",
"\n",
"track_history = defaultdict(lambda: [])\n",
"model = YOLO(\"yolov8n.pt\")\n",
"names = model.model.names\n",
"\n",
"video_path = \"/path/to/video/file.mp4\"\n",
"cap = cv2.VideoCapture(video_path)\n",
"assert cap.isOpened(), \"Error reading video file\"\n",
"\n",
"w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))\n",
"\n",
"result = cv2.VideoWriter(\"object_tracking.avi\",\n",
" cv2.VideoWriter_fourcc(*'mp4v'),\n",
" fps,\n",
" (w, h))\n",
"\n",
"while cap.isOpened():\n",
" success, frame = cap.read()\n",
" if success:\n",
" results = model.track(frame, persist=True, verbose=False)\n",
" boxes = results[0].boxes.xyxy.cpu()\n",
"\n",
" if results[0].boxes.id is not None:\n",
"\n",
" # Extract prediction results\n",
" clss = results[0].boxes.cls.cpu().tolist()\n",
" track_ids = results[0].boxes.id.int().cpu().tolist()\n",
" confs = results[0].boxes.conf.float().cpu().tolist()\n",
"\n",
" # Annotator Init\n",
" annotator = Annotator(frame, line_width=2)\n",
"\n",
" for box, cls, track_id in zip(boxes, clss, track_ids):\n",
" annotator.box_label(box, color=colors(int(cls), True), label=names[int(cls)])\n",
"\n",
" # Store tracking history\n",
" track = track_history[track_id]\n",
" track.append((int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)))\n",
" if len(track) > 30:\n",
" track.pop(0)\n",
"\n",
" # Plot tracks\n",
" points = np.array(track, dtype=np.int32).reshape((-1, 1, 2))\n",
" cv2.circle(frame, (track[-1]), 7, colors(int(cls), True), -1)\n",
" cv2.polylines(frame, [points], isClosed=False, color=colors(int(cls), True), thickness=2)\n",
"\n",
" result.write(frame)\n",
" if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n",
" break\n",
" else:\n",
" break\n",
"\n",
"result.release()\n",
"cap.release()\n",
"cv2.destroyAllWindows()"
],
"metadata": {
"id": "Cx-u59HQdu2o"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"#Community Support\n",
"\n",
"For more information, you can explore <a href=\"https://docs.ultralytics.com/modes/track/\">Ultralytics Object Tracking Docs</a>\n",
"\n",
"Ultralytics ‚ö° resources\n",
"- About Us – https://ultralytics.com/about\n",
"- Join Our Team – https://ultralytics.com/work\n",
"- Contact Us – https://ultralytics.com/contact\n",
"- Discord – https://discord.gg/2wNGbc6g9X\n",
"- Ultralytics License – https://ultralytics.com/license\n",
"\n",
"YOLOv8 üöÄ resources\n",
"- GitHub – https://github.com/ultralytics/ultralytics\n",
"- Docs – https://docs.ultralytics.com/"
],
"metadata": {
"id": "QrlKg-y3fEyD"
}
}
]
}
| 8,348 | Python | .py | 203 | 33.423645 | 506 | 0.534929 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,785 | object_counting.ipynb | arojsubedi_Improved-YOLOv8s/examples/object_counting.ipynb | {
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"gpuType": "T4"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"source": [
"<div align=\"center\">\n",
"\n",
" <a href=\"https://ultralytics.com/yolov8\" target=\"_blank\">\n",
" <img width=\"1024\", src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png\"></a>\n",
"\n",
" [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)\n",
"\n",
" <a href=\"https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/object_counting.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
"\n",
"Welcome to the Ultralytics YOLOv8 üöÄ notebook! <a href=\"https://github.com/ultralytics/ultralytics\">YOLOv8</a> is the latest version of the YOLO (You Only Look Once) AI models developed by <a href=\"https://ultralytics.com\">Ultralytics</a>. This notebook serves as the starting point for exploring the <a href=\"https://docs.ultralytics.com/guides/object-counting/\">Object Counting</a> and understand its features and capabilities.\n",
"\n",
"YOLOv8 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n",
"\n",
"We hope that the resources in this notebook will help you get the most out of <a href=\"https://docs.ultralytics.com/guides/object-counting/\">Ultralytics Object Counting</a>. Please browse the YOLOv8 <a href=\"https://docs.ultralytics.com/\">Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/ultralytics\">GitHub</a> for support, and join our <a href=\"https://ultralytics.com/discord\">Discord</a> community for questions and discussions!\n",
"\n",
"</div>"
],
"metadata": {
"id": "PN1cAxdvd61e"
}
},
{
"cell_type": "markdown",
"source": [
"# Setup\n",
"\n",
"Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware."
],
"metadata": {
"id": "o68Sg1oOeZm2"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "9dSwz_uOReMI"
},
"outputs": [],
"source": [
"!pip install ultralytics"
]
},
{
"cell_type": "markdown",
"source": [
"# Ultralytics Object Counting\n",
"\n",
"Counting objects using Ultralytics YOLOv8 entails the precise detection and enumeration of specific objects within videos and camera streams. YOLOv8 demonstrates exceptional performance in real-time applications, delivering efficient and accurate object counting across diverse scenarios such as crowd analysis and surveillance. This is attributed to its advanced algorithms and deep learning capabilities."
],
"metadata": {
"id": "m7VkxQ2aeg7k"
}
},
{
"cell_type": "code",
"source": [
"from ultralytics import YOLO\n",
"from ultralytics.solutions import object_counter\n",
"import cv2\n",
"\n",
"model = YOLO(\"yolov8n.pt\")\n",
"cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
"assert cap.isOpened(), \"Error reading video file\"\n",
"w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))\n",
"\n",
"# Define region points\n",
"region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)]\n",
"\n",
"# Video writer\n",
"video_writer = cv2.VideoWriter(\"object_counting_output.avi\",\n",
" cv2.VideoWriter_fourcc(*'mp4v'),\n",
" fps,\n",
" (w, h))\n",
"\n",
"# Init Object Counter\n",
"counter = object_counter.ObjectCounter()\n",
"counter.set_args(view_img=True,\n",
" reg_pts=region_points,\n",
" classes_names=model.names,\n",
" draw_tracks=True)\n",
"\n",
"while cap.isOpened():\n",
" success, im0 = cap.read()\n",
" if not success:\n",
" print(\"Video frame is empty or video processing has been successfully completed.\")\n",
" break\n",
" tracks = model.track(im0, persist=True, show=False)\n",
"\n",
" im0 = counter.start_counting(im0, tracks)\n",
" video_writer.write(im0)\n",
"\n",
"cap.release()\n",
"video_writer.release()\n",
"cv2.destroyAllWindows()"
],
"metadata": {
"id": "Cx-u59HQdu2o"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"#Community Support\n",
"\n",
"For more information, you can explore <a href=\"https://docs.ultralytics.com/guides/object-counting/\">Ultralytics Object Counting Docs</a>\n",
"\n",
"Ultralytics ‚ö° resources\n",
"- About Us – https://ultralytics.com/about\n",
"- Join Our Team – https://ultralytics.com/work\n",
"- Contact Us – https://ultralytics.com/contact\n",
"- Discord – https://discord.gg/2wNGbc6g9X\n",
"- Ultralytics License – https://ultralytics.com/license\n",
"\n",
"YOLOv8 üöÄ resources\n",
"- GitHub – https://github.com/ultralytics/ultralytics\n",
"- Docs – https://docs.ultralytics.com/"
],
"metadata": {
"id": "QrlKg-y3fEyD"
}
}
]
} | 6,574 | Python | .py | 147 | 37.129252 | 506 | 0.57794 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,786 | main.py | arojsubedi_Improved-YOLOv8s/examples/YOLOv8-ONNXRuntime/main.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import argparse
import cv2
import numpy as np
import onnxruntime as ort
import torch
from ultralytics.utils import ASSETS, yaml_load
from ultralytics.utils.checks import check_requirements, check_yaml
class YOLOv8:
"""YOLOv8 object detection model class for handling inference and visualization."""
def __init__(self, onnx_model, input_image, confidence_thres, iou_thres):
"""
Initializes an instance of the YOLOv8 class.
Args:
onnx_model: Path to the ONNX model.
input_image: Path to the input image.
confidence_thres: Confidence threshold for filtering detections.
iou_thres: IoU (Intersection over Union) threshold for non-maximum suppression.
"""
self.onnx_model = onnx_model
self.input_image = input_image
self.confidence_thres = confidence_thres
self.iou_thres = iou_thres
# Load the class names from the COCO dataset
self.classes = yaml_load(check_yaml("coco128.yaml"))["names"]
# Generate a color palette for the classes
self.color_palette = np.random.uniform(0, 255, size=(len(self.classes), 3))
def draw_detections(self, img, box, score, class_id):
"""
Draws bounding boxes and labels on the input image based on the detected objects.
Args:
img: The input image to draw detections on.
box: Detected bounding box.
score: Corresponding detection score.
class_id: Class ID for the detected object.
Returns:
None
"""
# Extract the coordinates of the bounding box
x1, y1, w, h = box
# Retrieve the color for the class ID
color = self.color_palette[class_id]
# Draw the bounding box on the image
cv2.rectangle(img, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2)
# Create the label text with class name and score
label = f"{self.classes[class_id]}: {score:.2f}"
# Calculate the dimensions of the label text
(label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
# Calculate the position of the label text
label_x = x1
label_y = y1 - 10 if y1 - 10 > label_height else y1 + 10
# Draw a filled rectangle as the background for the label text
cv2.rectangle(
img, (label_x, label_y - label_height), (label_x + label_width, label_y + label_height), color, cv2.FILLED
)
# Draw the label text on the image
cv2.putText(img, label, (label_x, label_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
def preprocess(self):
"""
Preprocesses the input image before performing inference.
Returns:
image_data: Preprocessed image data ready for inference.
"""
# Read the input image using OpenCV
self.img = cv2.imread(self.input_image)
# Get the height and width of the input image
self.img_height, self.img_width = self.img.shape[:2]
# Convert the image color space from BGR to RGB
img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
# Resize the image to match the input shape
img = cv2.resize(img, (self.input_width, self.input_height))
# Normalize the image data by dividing it by 255.0
image_data = np.array(img) / 255.0
# Transpose the image to have the channel dimension as the first dimension
image_data = np.transpose(image_data, (2, 0, 1)) # Channel first
# Expand the dimensions of the image data to match the expected input shape
image_data = np.expand_dims(image_data, axis=0).astype(np.float32)
# Return the preprocessed image data
return image_data
def postprocess(self, input_image, output):
"""
Performs post-processing on the model's output to extract bounding boxes, scores, and class IDs.
Args:
input_image (numpy.ndarray): The input image.
output (numpy.ndarray): The output of the model.
Returns:
numpy.ndarray: The input image with detections drawn on it.
"""
# Transpose and squeeze the output to match the expected shape
outputs = np.transpose(np.squeeze(output[0]))
# Get the number of rows in the outputs array
rows = outputs.shape[0]
# Lists to store the bounding boxes, scores, and class IDs of the detections
boxes = []
scores = []
class_ids = []
# Calculate the scaling factors for the bounding box coordinates
x_factor = self.img_width / self.input_width
y_factor = self.img_height / self.input_height
# Iterate over each row in the outputs array
for i in range(rows):
# Extract the class scores from the current row
classes_scores = outputs[i][4:]
# Find the maximum score among the class scores
max_score = np.amax(classes_scores)
# If the maximum score is above the confidence threshold
if max_score >= self.confidence_thres:
# Get the class ID with the highest score
class_id = np.argmax(classes_scores)
# Extract the bounding box coordinates from the current row
x, y, w, h = outputs[i][0], outputs[i][1], outputs[i][2], outputs[i][3]
# Calculate the scaled coordinates of the bounding box
left = int((x - w / 2) * x_factor)
top = int((y - h / 2) * y_factor)
width = int(w * x_factor)
height = int(h * y_factor)
# Add the class ID, score, and box coordinates to the respective lists
class_ids.append(class_id)
scores.append(max_score)
boxes.append([left, top, width, height])
# Apply non-maximum suppression to filter out overlapping bounding boxes
indices = cv2.dnn.NMSBoxes(boxes, scores, self.confidence_thres, self.iou_thres)
# Iterate over the selected indices after non-maximum suppression
for i in indices:
# Get the box, score, and class ID corresponding to the index
box = boxes[i]
score = scores[i]
class_id = class_ids[i]
# Draw the detection on the input image
self.draw_detections(input_image, box, score, class_id)
# Return the modified input image
return input_image
def main(self):
"""
Performs inference using an ONNX model and returns the output image with drawn detections.
Returns:
output_img: The output image with drawn detections.
"""
# Create an inference session using the ONNX model and specify execution providers
session = ort.InferenceSession(self.onnx_model, providers=["CUDAExecutionProvider", "CPUExecutionProvider"])
# Get the model inputs
model_inputs = session.get_inputs()
# Store the shape of the input for later use
input_shape = model_inputs[0].shape
self.input_width = input_shape[2]
self.input_height = input_shape[3]
# Preprocess the image data
img_data = self.preprocess()
# Run inference using the preprocessed image data
outputs = session.run(None, {model_inputs[0].name: img_data})
# Perform post-processing on the outputs to obtain output image.
return self.postprocess(self.img, outputs) # output image
if __name__ == "__main__":
# Create an argument parser to handle command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="yolov8n.onnx", help="Input your ONNX model.")
parser.add_argument("--img", type=str, default=str(ASSETS / "bus.jpg"), help="Path to input image.")
parser.add_argument("--conf-thres", type=float, default=0.5, help="Confidence threshold")
parser.add_argument("--iou-thres", type=float, default=0.5, help="NMS IoU threshold")
args = parser.parse_args()
# Check the requirements and select the appropriate backend (CPU or GPU)
check_requirements("onnxruntime-gpu" if torch.cuda.is_available() else "onnxruntime")
# Create an instance of the YOLOv8 class with the specified arguments
detection = YOLOv8(args.model, args.img, args.conf_thres, args.iou_thres)
# Perform object detection and obtain the output image
output_image = detection.main()
# Display the output image in a window
cv2.namedWindow("Output", cv2.WINDOW_NORMAL)
cv2.imshow("Output", output_image)
# Wait for a key press to exit
cv2.waitKey(0)
| 8,812 | Python | .py | 171 | 41.97076 | 118 | 0.643748 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,787 | yolov8_region_counter.py | arojsubedi_Improved-YOLOv8s/examples/YOLOv8-Region-Counter/yolov8_region_counter.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import argparse
from collections import defaultdict
from pathlib import Path
import cv2
import numpy as np
from shapely.geometry import Polygon
from shapely.geometry.point import Point
from ultralytics import YOLO
from ultralytics.utils.files import increment_path
from ultralytics.utils.plotting import Annotator, colors
track_history = defaultdict(list)
current_region = None
counting_regions = [
{
"name": "YOLOv8 Polygon Region",
"polygon": Polygon([(50, 80), (250, 20), (450, 80), (400, 350), (100, 350)]), # Polygon points
"counts": 0,
"dragging": False,
"region_color": (255, 42, 4), # BGR Value
"text_color": (255, 255, 255), # Region Text Color
},
{
"name": "YOLOv8 Rectangle Region",
"polygon": Polygon([(200, 250), (440, 250), (440, 550), (200, 550)]), # Polygon points
"counts": 0,
"dragging": False,
"region_color": (37, 255, 225), # BGR Value
"text_color": (0, 0, 0), # Region Text Color
},
]
def mouse_callback(event, x, y, flags, param):
"""
Handles mouse events for region manipulation.
Parameters:
event (int): The mouse event type (e.g., cv2.EVENT_LBUTTONDOWN).
x (int): The x-coordinate of the mouse pointer.
y (int): The y-coordinate of the mouse pointer.
flags (int): Additional flags passed by OpenCV.
param: Additional parameters passed to the callback (not used in this function).
Global Variables:
current_region (dict): A dictionary representing the current selected region.
Mouse Events:
- LBUTTONDOWN: Initiates dragging for the region containing the clicked point.
- MOUSEMOVE: Moves the selected region if dragging is active.
- LBUTTONUP: Ends dragging for the selected region.
Notes:
- This function is intended to be used as a callback for OpenCV mouse events.
- Requires the existence of the 'counting_regions' list and the 'Polygon' class.
Example:
>>> cv2.setMouseCallback(window_name, mouse_callback)
"""
global current_region
# Mouse left button down event
if event == cv2.EVENT_LBUTTONDOWN:
for region in counting_regions:
if region["polygon"].contains(Point((x, y))):
current_region = region
current_region["dragging"] = True
current_region["offset_x"] = x
current_region["offset_y"] = y
# Mouse move event
elif event == cv2.EVENT_MOUSEMOVE:
if current_region is not None and current_region["dragging"]:
dx = x - current_region["offset_x"]
dy = y - current_region["offset_y"]
current_region["polygon"] = Polygon(
[(p[0] + dx, p[1] + dy) for p in current_region["polygon"].exterior.coords]
)
current_region["offset_x"] = x
current_region["offset_y"] = y
# Mouse left button up event
elif event == cv2.EVENT_LBUTTONUP:
if current_region is not None and current_region["dragging"]:
current_region["dragging"] = False
def run(
weights="yolov8n.pt",
source=None,
device="cpu",
view_img=False,
save_img=False,
exist_ok=False,
classes=None,
line_thickness=2,
track_thickness=2,
region_thickness=2,
):
"""
Run Region counting on a video using YOLOv8 and ByteTrack.
Supports movable region for real time counting inside specific area.
Supports multiple regions counting.
Regions can be Polygons or rectangle in shape
Args:
weights (str): Model weights path.
source (str): Video file path.
device (str): processing device cpu, 0, 1
view_img (bool): Show results.
save_img (bool): Save results.
exist_ok (bool): Overwrite existing files.
classes (list): classes to detect and track
line_thickness (int): Bounding box thickness.
track_thickness (int): Tracking line thickness
region_thickness (int): Region thickness.
"""
vid_frame_count = 0
# Check source path
if not Path(source).exists():
raise FileNotFoundError(f"Source path '{source}' does not exist.")
# Setup Model
model = YOLO(f"{weights}")
model.to("cuda") if device == "0" else model.to("cpu")
# Extract classes names
names = model.model.names
# Video setup
videocapture = cv2.VideoCapture(source)
frame_width, frame_height = int(videocapture.get(3)), int(videocapture.get(4))
fps, fourcc = int(videocapture.get(5)), cv2.VideoWriter_fourcc(*"mp4v")
# Output setup
save_dir = increment_path(Path("ultralytics_rc_output") / "exp", exist_ok)
save_dir.mkdir(parents=True, exist_ok=True)
video_writer = cv2.VideoWriter(str(save_dir / f"{Path(source).stem}.mp4"), fourcc, fps, (frame_width, frame_height))
# Iterate over video frames
while videocapture.isOpened():
success, frame = videocapture.read()
if not success:
break
vid_frame_count += 1
# Extract the results
results = model.track(frame, persist=True, classes=classes)
if results[0].boxes.id is not None:
boxes = results[0].boxes.xyxy.cpu()
track_ids = results[0].boxes.id.int().cpu().tolist()
clss = results[0].boxes.cls.cpu().tolist()
annotator = Annotator(frame, line_width=line_thickness, example=str(names))
for box, track_id, cls in zip(boxes, track_ids, clss):
annotator.box_label(box, str(names[cls]), color=colors(cls, True))
bbox_center = (box[0] + box[2]) / 2, (box[1] + box[3]) / 2 # Bbox center
track = track_history[track_id] # Tracking Lines plot
track.append((float(bbox_center[0]), float(bbox_center[1])))
if len(track) > 30:
track.pop(0)
points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
cv2.polylines(frame, [points], isClosed=False, color=colors(cls, True), thickness=track_thickness)
# Check if detection inside region
for region in counting_regions:
if region["polygon"].contains(Point((bbox_center[0], bbox_center[1]))):
region["counts"] += 1
# Draw regions (Polygons/Rectangles)
for region in counting_regions:
region_label = str(region["counts"])
region_color = region["region_color"]
region_text_color = region["text_color"]
polygon_coords = np.array(region["polygon"].exterior.coords, dtype=np.int32)
centroid_x, centroid_y = int(region["polygon"].centroid.x), int(region["polygon"].centroid.y)
text_size, _ = cv2.getTextSize(
region_label, cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, thickness=line_thickness
)
text_x = centroid_x - text_size[0] // 2
text_y = centroid_y + text_size[1] // 2
cv2.rectangle(
frame,
(text_x - 5, text_y - text_size[1] - 5),
(text_x + text_size[0] + 5, text_y + 5),
region_color,
-1,
)
cv2.putText(
frame, region_label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, region_text_color, line_thickness
)
cv2.polylines(frame, [polygon_coords], isClosed=True, color=region_color, thickness=region_thickness)
if view_img:
if vid_frame_count == 1:
cv2.namedWindow("Ultralytics YOLOv8 Region Counter Movable")
cv2.setMouseCallback("Ultralytics YOLOv8 Region Counter Movable", mouse_callback)
cv2.imshow("Ultralytics YOLOv8 Region Counter Movable", frame)
if save_img:
video_writer.write(frame)
for region in counting_regions: # Reinitialize count for each region
region["counts"] = 0
if cv2.waitKey(1) & 0xFF == ord("q"):
break
del vid_frame_count
video_writer.release()
videocapture.release()
cv2.destroyAllWindows()
def parse_opt():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--weights", type=str, default="yolov8n.pt", help="initial weights path")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--source", type=str, required=True, help="video file path")
parser.add_argument("--view-img", action="store_true", help="show results")
parser.add_argument("--save-img", action="store_true", help="save results")
parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
parser.add_argument("--classes", nargs="+", type=int, help="filter by class: --classes 0, or --classes 0 2 3")
parser.add_argument("--line-thickness", type=int, default=2, help="bounding box thickness")
parser.add_argument("--track-thickness", type=int, default=2, help="Tracking line thickness")
parser.add_argument("--region-thickness", type=int, default=4, help="Region thickness")
return parser.parse_args()
def main(opt):
"""Main function."""
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt)
| 9,478 | Python | .py | 205 | 37.556098 | 120 | 0.624472 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,788 | yolov8_sahi.py | arojsubedi_Improved-YOLOv8s/examples/YOLOv8-SAHI-Inference-Video/yolov8_sahi.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import argparse
from pathlib import Path
import cv2
from sahi import AutoDetectionModel
from sahi.predict import get_sliced_prediction
from sahi.utils.yolov8 import download_yolov8s_model
from ultralytics.utils.files import increment_path
def run(weights="yolov8n.pt", source="test.mp4", view_img=False, save_img=False, exist_ok=False):
"""
Run object detection on a video using YOLOv8 and SAHI.
Args:
weights (str): Model weights path.
source (str): Video file path.
view_img (bool): Show results.
save_img (bool): Save results.
exist_ok (bool): Overwrite existing files.
"""
# Check source path
if not Path(source).exists():
raise FileNotFoundError(f"Source path '{source}' does not exist.")
yolov8_model_path = f"models/{weights}"
download_yolov8s_model(yolov8_model_path)
detection_model = AutoDetectionModel.from_pretrained(
model_type="yolov8", model_path=yolov8_model_path, confidence_threshold=0.3, device="cpu"
)
# Video setup
videocapture = cv2.VideoCapture(source)
frame_width, frame_height = int(videocapture.get(3)), int(videocapture.get(4))
fps, fourcc = int(videocapture.get(5)), cv2.VideoWriter_fourcc(*"mp4v")
# Output setup
save_dir = increment_path(Path("ultralytics_results_with_sahi") / "exp", exist_ok)
save_dir.mkdir(parents=True, exist_ok=True)
video_writer = cv2.VideoWriter(str(save_dir / f"{Path(source).stem}.mp4"), fourcc, fps, (frame_width, frame_height))
while videocapture.isOpened():
success, frame = videocapture.read()
if not success:
break
results = get_sliced_prediction(
frame, detection_model, slice_height=512, slice_width=512, overlap_height_ratio=0.2, overlap_width_ratio=0.2
)
object_prediction_list = results.object_prediction_list
boxes_list = []
clss_list = []
for ind, _ in enumerate(object_prediction_list):
boxes = (
object_prediction_list[ind].bbox.minx,
object_prediction_list[ind].bbox.miny,
object_prediction_list[ind].bbox.maxx,
object_prediction_list[ind].bbox.maxy,
)
clss = object_prediction_list[ind].category.name
boxes_list.append(boxes)
clss_list.append(clss)
for box, cls in zip(boxes_list, clss_list):
x1, y1, x2, y2 = box
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (56, 56, 255), 2)
label = str(cls)
t_size = cv2.getTextSize(label, 0, fontScale=0.6, thickness=1)[0]
cv2.rectangle(
frame, (int(x1), int(y1) - t_size[1] - 3), (int(x1) + t_size[0], int(y1) + 3), (56, 56, 255), -1
)
cv2.putText(
frame, label, (int(x1), int(y1) - 2), 0, 0.6, [255, 255, 255], thickness=1, lineType=cv2.LINE_AA
)
if view_img:
cv2.imshow(Path(source).stem, frame)
if save_img:
video_writer.write(frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
video_writer.release()
videocapture.release()
cv2.destroyAllWindows()
def parse_opt():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--weights", type=str, default="yolov8n.pt", help="initial weights path")
parser.add_argument("--source", type=str, required=True, help="video file path")
parser.add_argument("--view-img", action="store_true", help="show results")
parser.add_argument("--save-img", action="store_true", help="save results")
parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
return parser.parse_args()
def main(opt):
"""Main function."""
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt)
| 3,997 | Python | .py | 89 | 36.921348 | 120 | 0.632784 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,789 | main.py | arojsubedi_Improved-YOLOv8s/examples/YOLOv8-OpenCV-ONNX-Python/main.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import argparse
import cv2.dnn
import numpy as np
from ultralytics.utils import ASSETS, yaml_load
from ultralytics.utils.checks import check_yaml
CLASSES = yaml_load(check_yaml("coco128.yaml"))["names"]
colors = np.random.uniform(0, 255, size=(len(CLASSES), 3))
def draw_bounding_box(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
"""
Draws bounding boxes on the input image based on the provided arguments.
Args:
img (numpy.ndarray): The input image to draw the bounding box on.
class_id (int): Class ID of the detected object.
confidence (float): Confidence score of the detected object.
x (int): X-coordinate of the top-left corner of the bounding box.
y (int): Y-coordinate of the top-left corner of the bounding box.
x_plus_w (int): X-coordinate of the bottom-right corner of the bounding box.
y_plus_h (int): Y-coordinate of the bottom-right corner of the bounding box.
"""
label = f"{CLASSES[class_id]} ({confidence:.2f})"
color = colors[class_id]
cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)
cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
def main(onnx_model, input_image):
"""
Main function to load ONNX model, perform inference, draw bounding boxes, and display the output image.
Args:
onnx_model (str): Path to the ONNX model.
input_image (str): Path to the input image.
Returns:
list: List of dictionaries containing detection information such as class_id, class_name, confidence, etc.
"""
# Load the ONNX model
model: cv2.dnn.Net = cv2.dnn.readNetFromONNX(onnx_model)
# Read the input image
original_image: np.ndarray = cv2.imread(input_image)
[height, width, _] = original_image.shape
# Prepare a square image for inference
length = max((height, width))
image = np.zeros((length, length, 3), np.uint8)
image[0:height, 0:width] = original_image
# Calculate scale factor
scale = length / 640
# Preprocess the image and prepare blob for model
blob = cv2.dnn.blobFromImage(image, scalefactor=1 / 255, size=(640, 640), swapRB=True)
model.setInput(blob)
# Perform inference
outputs = model.forward()
# Prepare output array
outputs = np.array([cv2.transpose(outputs[0])])
rows = outputs.shape[1]
boxes = []
scores = []
class_ids = []
# Iterate through output to collect bounding boxes, confidence scores, and class IDs
for i in range(rows):
classes_scores = outputs[0][i][4:]
(minScore, maxScore, minClassLoc, (x, maxClassIndex)) = cv2.minMaxLoc(classes_scores)
if maxScore >= 0.25:
box = [
outputs[0][i][0] - (0.5 * outputs[0][i][2]),
outputs[0][i][1] - (0.5 * outputs[0][i][3]),
outputs[0][i][2],
outputs[0][i][3],
]
boxes.append(box)
scores.append(maxScore)
class_ids.append(maxClassIndex)
# Apply NMS (Non-maximum suppression)
result_boxes = cv2.dnn.NMSBoxes(boxes, scores, 0.25, 0.45, 0.5)
detections = []
# Iterate through NMS results to draw bounding boxes and labels
for i in range(len(result_boxes)):
index = result_boxes[i]
box = boxes[index]
detection = {
"class_id": class_ids[index],
"class_name": CLASSES[class_ids[index]],
"confidence": scores[index],
"box": box,
"scale": scale,
}
detections.append(detection)
draw_bounding_box(
original_image,
class_ids[index],
scores[index],
round(box[0] * scale),
round(box[1] * scale),
round((box[0] + box[2]) * scale),
round((box[1] + box[3]) * scale),
)
# Display the image with bounding boxes
cv2.imshow("image", original_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return detections
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="yolov8n.onnx", help="Input your ONNX model.")
parser.add_argument("--img", default=str(ASSETS / "bus.jpg"), help="Path to input image.")
args = parser.parse_args()
main(args.model, args.img)
| 4,404 | Python | .py | 104 | 34.980769 | 114 | 0.629621 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,790 | main.py | arojsubedi_Improved-YOLOv8s/examples/YOLOv8-Segmentation-ONNXRuntime-Python/main.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import argparse
import cv2
import numpy as np
import onnxruntime as ort
from ultralytics.utils import ASSETS, yaml_load
from ultralytics.utils.checks import check_yaml
from ultralytics.utils.plotting import Colors
class YOLOv8Seg:
"""YOLOv8 segmentation model."""
def __init__(self, onnx_model):
"""
Initialization.
Args:
onnx_model (str): Path to the ONNX model.
"""
# Build Ort session
self.session = ort.InferenceSession(
onnx_model,
providers=["CUDAExecutionProvider", "CPUExecutionProvider"]
if ort.get_device() == "GPU"
else ["CPUExecutionProvider"],
)
# Numpy dtype: support both FP32 and FP16 onnx model
self.ndtype = np.half if self.session.get_inputs()[0].type == "tensor(float16)" else np.single
# Get model width and height(YOLOv8-seg only has one input)
self.model_height, self.model_width = [x.shape for x in self.session.get_inputs()][0][-2:]
# Load COCO class names
self.classes = yaml_load(check_yaml("coco128.yaml"))["names"]
# Create color palette
self.color_palette = Colors()
def __call__(self, im0, conf_threshold=0.4, iou_threshold=0.45, nm=32):
"""
The whole pipeline: pre-process -> inference -> post-process.
Args:
im0 (Numpy.ndarray): original input image.
conf_threshold (float): confidence threshold for filtering predictions.
iou_threshold (float): iou threshold for NMS.
nm (int): the number of masks.
Returns:
boxes (List): list of bounding boxes.
segments (List): list of segments.
masks (np.ndarray): [N, H, W], output masks.
"""
# Pre-process
im, ratio, (pad_w, pad_h) = self.preprocess(im0)
# Ort inference
preds = self.session.run(None, {self.session.get_inputs()[0].name: im})
# Post-process
boxes, segments, masks = self.postprocess(
preds,
im0=im0,
ratio=ratio,
pad_w=pad_w,
pad_h=pad_h,
conf_threshold=conf_threshold,
iou_threshold=iou_threshold,
nm=nm,
)
return boxes, segments, masks
def preprocess(self, img):
"""
Pre-processes the input image.
Args:
img (Numpy.ndarray): image about to be processed.
Returns:
img_process (Numpy.ndarray): image preprocessed for inference.
ratio (tuple): width, height ratios in letterbox.
pad_w (float): width padding in letterbox.
pad_h (float): height padding in letterbox.
"""
# Resize and pad input image using letterbox() (Borrowed from Ultralytics)
shape = img.shape[:2] # original image shape
new_shape = (self.model_height, self.model_width)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
ratio = r, r
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
pad_w, pad_h = (new_shape[1] - new_unpad[0]) / 2, (new_shape[0] - new_unpad[1]) / 2 # wh padding
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(pad_h - 0.1)), int(round(pad_h + 0.1))
left, right = int(round(pad_w - 0.1)), int(round(pad_w + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114))
# Transforms: HWC to CHW -> BGR to RGB -> div(255) -> contiguous -> add axis(optional)
img = np.ascontiguousarray(np.einsum("HWC->CHW", img)[::-1], dtype=self.ndtype) / 255.0
img_process = img[None] if len(img.shape) == 3 else img
return img_process, ratio, (pad_w, pad_h)
def postprocess(self, preds, im0, ratio, pad_w, pad_h, conf_threshold, iou_threshold, nm=32):
"""
Post-process the prediction.
Args:
preds (Numpy.ndarray): predictions come from ort.session.run().
im0 (Numpy.ndarray): [h, w, c] original input image.
ratio (tuple): width, height ratios in letterbox.
pad_w (float): width padding in letterbox.
pad_h (float): height padding in letterbox.
conf_threshold (float): conf threshold.
iou_threshold (float): iou threshold.
nm (int): the number of masks.
Returns:
boxes (List): list of bounding boxes.
segments (List): list of segments.
masks (np.ndarray): [N, H, W], output masks.
"""
x, protos = preds[0], preds[1] # Two outputs: predictions and protos
# Transpose the first output: (Batch_size, xywh_conf_cls_nm, Num_anchors) -> (Batch_size, Num_anchors, xywh_conf_cls_nm)
x = np.einsum("bcn->bnc", x)
# Predictions filtering by conf-threshold
x = x[np.amax(x[..., 4:-nm], axis=-1) > conf_threshold]
# Create a new matrix which merge these(box, score, cls, nm) into one
# For more details about `numpy.c_()`: https://numpy.org/doc/1.26/reference/generated/numpy.c_.html
x = np.c_[x[..., :4], np.amax(x[..., 4:-nm], axis=-1), np.argmax(x[..., 4:-nm], axis=-1), x[..., -nm:]]
# NMS filtering
x = x[cv2.dnn.NMSBoxes(x[:, :4], x[:, 4], conf_threshold, iou_threshold)]
# Decode and return
if len(x) > 0:
# Bounding boxes format change: cxcywh -> xyxy
x[..., [0, 1]] -= x[..., [2, 3]] / 2
x[..., [2, 3]] += x[..., [0, 1]]
# Rescales bounding boxes from model shape(model_height, model_width) to the shape of original image
x[..., :4] -= [pad_w, pad_h, pad_w, pad_h]
x[..., :4] /= min(ratio)
# Bounding boxes boundary clamp
x[..., [0, 2]] = x[:, [0, 2]].clip(0, im0.shape[1])
x[..., [1, 3]] = x[:, [1, 3]].clip(0, im0.shape[0])
# Process masks
masks = self.process_mask(protos[0], x[:, 6:], x[:, :4], im0.shape)
# Masks -> Segments(contours)
segments = self.masks2segments(masks)
return x[..., :6], segments, masks # boxes, segments, masks
else:
return [], [], []
@staticmethod
def masks2segments(masks):
"""
It takes a list of masks(n,h,w) and returns a list of segments(n,xy) (Borrowed from
https://github.com/ultralytics/ultralytics/blob/465df3024f44fa97d4fad9986530d5a13cdabdca/ultralytics/utils/ops.py#L750)
Args:
masks (numpy.ndarray): the output of the model, which is a tensor of shape (batch_size, 160, 160).
Returns:
segments (List): list of segment masks.
"""
segments = []
for x in masks.astype("uint8"):
c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0] # CHAIN_APPROX_SIMPLE
if c:
c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)
else:
c = np.zeros((0, 2)) # no segments found
segments.append(c.astype("float32"))
return segments
@staticmethod
def crop_mask(masks, boxes):
"""
It takes a mask and a bounding box, and returns a mask that is cropped to the bounding box. (Borrowed from
https://github.com/ultralytics/ultralytics/blob/465df3024f44fa97d4fad9986530d5a13cdabdca/ultralytics/utils/ops.py#L599)
Args:
masks (Numpy.ndarray): [n, h, w] tensor of masks.
boxes (Numpy.ndarray): [n, 4] tensor of bbox coordinates in relative point form.
Returns:
(Numpy.ndarray): The masks are being cropped to the bounding box.
"""
n, h, w = masks.shape
x1, y1, x2, y2 = np.split(boxes[:, :, None], 4, 1)
r = np.arange(w, dtype=x1.dtype)[None, None, :]
c = np.arange(h, dtype=x1.dtype)[None, :, None]
return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
def process_mask(self, protos, masks_in, bboxes, im0_shape):
"""
Takes the output of the mask head, and applies the mask to the bounding boxes. This produces masks of higher quality
but is slower. (Borrowed from https://github.com/ultralytics/ultralytics/blob/465df3024f44fa97d4fad9986530d5a13cdabdca/ultralytics/utils/ops.py#L618)
Args:
protos (numpy.ndarray): [mask_dim, mask_h, mask_w].
masks_in (numpy.ndarray): [n, mask_dim], n is number of masks after nms.
bboxes (numpy.ndarray): bboxes re-scaled to original image shape.
im0_shape (tuple): the size of the input image (h,w,c).
Returns:
(numpy.ndarray): The upsampled masks.
"""
c, mh, mw = protos.shape
masks = np.matmul(masks_in, protos.reshape((c, -1))).reshape((-1, mh, mw)).transpose(1, 2, 0) # HWN
masks = np.ascontiguousarray(masks)
masks = self.scale_mask(masks, im0_shape) # re-scale mask from P3 shape to original input image shape
masks = np.einsum("HWN -> NHW", masks) # HWN -> NHW
masks = self.crop_mask(masks, bboxes)
return np.greater(masks, 0.5)
@staticmethod
def scale_mask(masks, im0_shape, ratio_pad=None):
"""
Takes a mask, and resizes it to the original image size. (Borrowed from
https://github.com/ultralytics/ultralytics/blob/465df3024f44fa97d4fad9986530d5a13cdabdca/ultralytics/utils/ops.py#L305)
Args:
masks (np.ndarray): resized and padded masks/images, [h, w, num]/[h, w, 3].
im0_shape (tuple): the original image shape.
ratio_pad (tuple): the ratio of the padding to the original image.
Returns:
masks (np.ndarray): The masks that are being returned.
"""
im1_shape = masks.shape[:2]
if ratio_pad is None: # calculate from im0_shape
gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new
pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding
else:
pad = ratio_pad[1]
# Calculate tlbr of mask
top, left = int(round(pad[1] - 0.1)), int(round(pad[0] - 0.1)) # y, x
bottom, right = int(round(im1_shape[0] - pad[1] + 0.1)), int(round(im1_shape[1] - pad[0] + 0.1))
if len(masks.shape) < 2:
raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
masks = masks[top:bottom, left:right]
masks = cv2.resize(
masks, (im0_shape[1], im0_shape[0]), interpolation=cv2.INTER_LINEAR
) # INTER_CUBIC would be better
if len(masks.shape) == 2:
masks = masks[:, :, None]
return masks
def draw_and_visualize(self, im, bboxes, segments, vis=False, save=True):
"""
Draw and visualize results.
Args:
im (np.ndarray): original image, shape [h, w, c].
bboxes (numpy.ndarray): [n, 4], n is number of bboxes.
segments (List): list of segment masks.
vis (bool): imshow using OpenCV.
save (bool): save image annotated.
Returns:
None
"""
# Draw rectangles and polygons
im_canvas = im.copy()
for (*box, conf, cls_), segment in zip(bboxes, segments):
# draw contour and fill mask
cv2.polylines(im, np.int32([segment]), True, (255, 255, 255), 2) # white borderline
cv2.fillPoly(im_canvas, np.int32([segment]), self.color_palette(int(cls_), bgr=True))
# draw bbox rectangle
cv2.rectangle(
im,
(int(box[0]), int(box[1])),
(int(box[2]), int(box[3])),
self.color_palette(int(cls_), bgr=True),
1,
cv2.LINE_AA,
)
cv2.putText(
im,
f"{self.classes[cls_]}: {conf:.3f}",
(int(box[0]), int(box[1] - 9)),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
self.color_palette(int(cls_), bgr=True),
2,
cv2.LINE_AA,
)
# Mix image
im = cv2.addWeighted(im_canvas, 0.3, im, 0.7, 0)
# Show image
if vis:
cv2.imshow("demo", im)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Save image
if save:
cv2.imwrite("demo.jpg", im)
if __name__ == "__main__":
# Create an argument parser to handle command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True, help="Path to ONNX model")
parser.add_argument("--source", type=str, default=str(ASSETS / "bus.jpg"), help="Path to input image")
parser.add_argument("--conf", type=float, default=0.25, help="Confidence threshold")
parser.add_argument("--iou", type=float, default=0.45, help="NMS IoU threshold")
args = parser.parse_args()
# Build model
model = YOLOv8Seg(args.model)
# Read image by OpenCV
img = cv2.imread(args.source)
# Inference
boxes, segments, _ = model(img, conf_threshold=args.conf, iou_threshold=args.iou)
# Draw bboxes and polygons
if len(boxes) > 0:
model.draw_and_visualize(img, boxes, segments, vis=False, save=True)
| 13,623 | Python | .py | 280 | 38.317857 | 157 | 0.577667 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,791 | main.py | arojsubedi_Improved-YOLOv8s/examples/YOLOv8-OpenCV-int8-tflite-Python/main.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import argparse
import cv2
import numpy as np
from tflite_runtime import interpreter as tflite
from ultralytics.utils import ASSETS, yaml_load
from ultralytics.utils.checks import check_yaml
# Declare as global variables, can be updated based trained model image size
img_width = 640
img_height = 640
class LetterBox:
def __init__(
self, new_shape=(img_width, img_height), auto=False, scaleFill=False, scaleup=True, center=True, stride=32
):
self.new_shape = new_shape
self.auto = auto
self.scaleFill = scaleFill
self.scaleup = scaleup
self.stride = stride
self.center = center # Put the image in the middle or top-left
def __call__(self, labels=None, image=None):
"""Return updated labels and image with added border."""
if labels is None:
labels = {}
img = labels.get("img") if image is None else image
shape = img.shape[:2] # current shape [height, width]
new_shape = labels.pop("rect_shape", self.new_shape)
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not self.scaleup: # only scale down, do not scale up (for better val mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if self.auto: # minimum rectangle
dw, dh = np.mod(dw, self.stride), np.mod(dh, self.stride) # wh padding
elif self.scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
if self.center:
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)) if self.center else 0, int(round(dh + 0.1))
left, right = int(round(dw - 0.1)) if self.center else 0, int(round(dw + 0.1))
img = cv2.copyMakeBorder(
img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114)
) # add border
if labels.get("ratio_pad"):
labels["ratio_pad"] = (labels["ratio_pad"], (left, top)) # for evaluation
if len(labels):
labels = self._update_labels(labels, ratio, dw, dh)
labels["img"] = img
labels["resized_shape"] = new_shape
return labels
else:
return img
def _update_labels(self, labels, ratio, padw, padh):
"""Update labels."""
labels["instances"].convert_bbox(format="xyxy")
labels["instances"].denormalize(*labels["img"].shape[:2][::-1])
labels["instances"].scale(*ratio)
labels["instances"].add_padding(padw, padh)
return labels
class Yolov8TFLite:
def __init__(self, tflite_model, input_image, confidence_thres, iou_thres):
"""
Initializes an instance of the Yolov8TFLite class.
Args:
tflite_model: Path to the TFLite model.
input_image: Path to the input image.
confidence_thres: Confidence threshold for filtering detections.
iou_thres: IoU (Intersection over Union) threshold for non-maximum suppression.
"""
self.tflite_model = tflite_model
self.input_image = input_image
self.confidence_thres = confidence_thres
self.iou_thres = iou_thres
# Load the class names from the COCO dataset
self.classes = yaml_load(check_yaml("coco128.yaml"))["names"]
# Generate a color palette for the classes
self.color_palette = np.random.uniform(0, 255, size=(len(self.classes), 3))
def draw_detections(self, img, box, score, class_id):
"""
Draws bounding boxes and labels on the input image based on the detected objects.
Args:
img: The input image to draw detections on.
box: Detected bounding box.
score: Corresponding detection score.
class_id: Class ID for the detected object.
Returns:
None
"""
# Extract the coordinates of the bounding box
x1, y1, w, h = box
# Retrieve the color for the class ID
color = self.color_palette[class_id]
# Draw the bounding box on the image
cv2.rectangle(img, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2)
# Create the label text with class name and score
label = f"{self.classes[class_id]}: {score:.2f}"
# Calculate the dimensions of the label text
(label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
# Calculate the position of the label text
label_x = x1
label_y = y1 - 10 if y1 - 10 > label_height else y1 + 10
# Draw a filled rectangle as the background for the label text
cv2.rectangle(
img,
(int(label_x), int(label_y - label_height)),
(int(label_x + label_width), int(label_y + label_height)),
color,
cv2.FILLED,
)
# Draw the label text on the image
cv2.putText(img, label, (int(label_x), int(label_y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
def preprocess(self):
"""
Preprocesses the input image before performing inference.
Returns:
image_data: Preprocessed image data ready for inference.
"""
# Read the input image using OpenCV
self.img = cv2.imread(self.input_image)
print("image before", self.img)
# Get the height and width of the input image
self.img_height, self.img_width = self.img.shape[:2]
letterbox = LetterBox(new_shape=[img_width, img_height], auto=False, stride=32)
image = letterbox(image=self.img)
image = [image]
image = np.stack(image)
image = image[..., ::-1].transpose((0, 3, 1, 2))
img = np.ascontiguousarray(image)
# n, h, w, c
image = img.astype(np.float32)
return image / 255
def postprocess(self, input_image, output):
"""
Performs post-processing on the model's output to extract bounding boxes, scores, and class IDs.
Args:
input_image (numpy.ndarray): The input image.
output (numpy.ndarray): The output of the model.
Returns:
numpy.ndarray: The input image with detections drawn on it.
"""
boxes = []
scores = []
class_ids = []
for pred in output:
pred = np.transpose(pred)
for box in pred:
x, y, w, h = box[:4]
x1 = x - w / 2
y1 = y - h / 2
boxes.append([x1, y1, w, h])
idx = np.argmax(box[4:])
scores.append(box[idx + 4])
class_ids.append(idx)
indices = cv2.dnn.NMSBoxes(boxes, scores, self.confidence_thres, self.iou_thres)
for i in indices:
# Get the box, score, and class ID corresponding to the index
box = boxes[i]
gain = min(img_width / self.img_width, img_height / self.img_height)
pad = (
round((img_width - self.img_width * gain) / 2 - 0.1),
round((img_height - self.img_height * gain) / 2 - 0.1),
)
box[0] = (box[0] - pad[0]) / gain
box[1] = (box[1] - pad[1]) / gain
box[2] = box[2] / gain
box[3] = box[3] / gain
score = scores[i]
class_id = class_ids[i]
if score > 0.25:
print(box, score, class_id)
# Draw the detection on the input image
self.draw_detections(input_image, box, score, class_id)
return input_image
def main(self):
"""
Performs inference using a TFLite model and returns the output image with drawn detections.
Returns:
output_img: The output image with drawn detections.
"""
# Create an interpreter for the TFLite model
interpreter = tflite.Interpreter(model_path=self.tflite_model)
self.model = interpreter
interpreter.allocate_tensors()
# Get the model inputs
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Store the shape of the input for later use
input_shape = input_details[0]["shape"]
self.input_width = input_shape[1]
self.input_height = input_shape[2]
# Preprocess the image data
img_data = self.preprocess()
img_data = img_data
# img_data = img_data.cpu().numpy()
# Set the input tensor to the interpreter
print(input_details[0]["index"])
print(img_data.shape)
img_data = img_data.transpose((0, 2, 3, 1))
scale, zero_point = input_details[0]["quantization"]
interpreter.set_tensor(input_details[0]["index"], img_data)
# Run inference
interpreter.invoke()
# Get the output tensor from the interpreter
output = interpreter.get_tensor(output_details[0]["index"])
scale, zero_point = output_details[0]["quantization"]
output = (output.astype(np.float32) - zero_point) * scale
output[:, [0, 2]] *= img_width
output[:, [1, 3]] *= img_height
print(output)
# Perform post-processing on the outputs to obtain output image.
return self.postprocess(self.img, output)
if __name__ == "__main__":
# Create an argument parser to handle command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", type=str, default="yolov8n_full_integer_quant.tflite", help="Input your TFLite model."
)
parser.add_argument("--img", type=str, default=str(ASSETS / "bus.jpg"), help="Path to input image.")
parser.add_argument("--conf-thres", type=float, default=0.5, help="Confidence threshold")
parser.add_argument("--iou-thres", type=float, default=0.5, help="NMS IoU threshold")
args = parser.parse_args()
# Create an instance of the Yolov8TFLite class with the specified arguments
detection = Yolov8TFLite(args.model, args.img, args.conf_thres, args.iou_thres)
# Perform object detection and obtain the output image
output_image = detection.main()
# Display the output image in a window
cv2.imshow("Output", output_image)
# Wait for a key press to exit
cv2.waitKey(0)
| 10,983 | Python | .py | 239 | 36.317992 | 119 | 0.599401 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,792 | Dockerfile-python | arojsubedi_Improved-YOLOv8s/docker/Dockerfile-python | # Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv8 deployments
# Use the official Python 3.10 slim-bookworm as base image
FROM python:3.10-slim-bookworm
# Downloads to user config dir
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install linux packages
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
RUN apt update \
&& apt install --no-install-recommends -y python3-pip git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
# Create working directory
WORKDIR /usr/src/ultralytics
# Copy contents
# COPY . /usr/src/ultralytics # git permission issues inside container
RUN git clone https://github.com/ultralytics/ultralytics -b main /usr/src/ultralytics
ADD https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8n.pt /usr/src/ultralytics/
# Remove python3.11/EXTERNALLY-MANAGED or use 'pip install --break-system-packages' avoid 'externally-managed-environment' Ubuntu nightly error
# RUN rm -rf /usr/lib/python3.11/EXTERNALLY-MANAGED
# Install pip packages
RUN python3 -m pip install --upgrade pip wheel
RUN pip install --no-cache -e ".[export]" --extra-index-url https://download.pytorch.org/whl/cpu
# Run exports to AutoInstall packages
RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32
RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32
# Requires <= Python 3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991
RUN pip install --no-cache paddlepaddle>=2.6.0 x2paddle
# Remove exported models
RUN rm -rf tmp
# Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push
# t=ultralytics/ultralytics:latest-python && sudo docker build -f docker/Dockerfile-python -t $t . && sudo docker push $t
# Run
# t=ultralytics/ultralytics:latest-python && sudo docker run -it --ipc=host $t
# Pull and Run
# t=ultralytics/ultralytics:latest-python && sudo docker pull $t && sudo docker run -it --ipc=host $t
# Pull and Run with local volume mounted
# t=ultralytics/ultralytics:latest-python && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
| 2,547 | Python | .py | 40 | 62.025 | 143 | 0.744485 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,793 | build_reference.py | arojsubedi_Improved-YOLOv8s/docs/build_reference.py | # Ultralytics YOLO üöÄ, AGPL-3.0 license
"""
Helper file to build Ultralytics Docs reference section. Recursively walks through ultralytics dir and builds an MkDocs
reference section of *.md files composed of classes and functions, and also creates a nav menu for use in mkdocs.yaml.
Note: Must be run from repository root directory. Do not run from docs directory.
"""
import re
from collections import defaultdict
from pathlib import Path
# Get package root i.e. /Users/glennjocher/PycharmProjects/ultralytics/ultralytics
from ultralytics.utils import ROOT as PACKAGE_DIR
# Constants
REFERENCE_DIR = PACKAGE_DIR.parent / "docs/en/reference"
GITHUB_REPO = "ultralytics/ultralytics"
def extract_classes_and_functions(filepath: Path) -> tuple:
"""Extracts class and function names from a given Python file."""
content = filepath.read_text()
class_pattern = r"(?:^|\n)class\s(\w+)(?:\(|:)"
func_pattern = r"(?:^|\n)def\s(\w+)\("
classes = re.findall(class_pattern, content)
functions = re.findall(func_pattern, content)
return classes, functions
def create_markdown(py_filepath: Path, module_path: str, classes: list, functions: list):
"""Creates a Markdown file containing the API reference for the given Python module."""
md_filepath = py_filepath.with_suffix(".md")
# Read existing content and keep header content between first two ---
header_content = ""
if md_filepath.exists():
existing_content = md_filepath.read_text()
header_parts = existing_content.split("---")
for part in header_parts:
if "description:" in part or "comments:" in part:
header_content += f"---{part}---\n\n"
module_name = module_path.replace(".__init__", "")
module_path = module_path.replace(".", "/")
url = f"https://github.com/{GITHUB_REPO}/blob/main/{module_path}.py"
edit = f"https://github.com/{GITHUB_REPO}/edit/main/{module_path}.py"
title_content = (
f"# Reference for `{module_path}.py`\n\n"
f"!!! Note\n\n"
f" This file is available at [{url}]({url}). If you spot a problem please help fix it by [contributing](https://docs.ultralytics.com/help/contributing/) a [Pull Request]({edit}) üõ†Ô∏è. Thank you üôè!\n\n"
)
md_content = ["<br><br>\n"] + [f"## ::: {module_name}.{class_name}\n\n<br><br>\n" for class_name in classes]
md_content.extend(f"## ::: {module_name}.{func_name}\n\n<br><br>\n" for func_name in functions)
md_content = header_content + title_content + "\n".join(md_content)
if not md_content.endswith("\n"):
md_content += "\n"
md_filepath.parent.mkdir(parents=True, exist_ok=True)
md_filepath.write_text(md_content)
return md_filepath.relative_to(PACKAGE_DIR.parent)
def nested_dict() -> defaultdict:
"""Creates and returns a nested defaultdict."""
return defaultdict(nested_dict)
def sort_nested_dict(d: dict) -> dict:
"""Sorts a nested dictionary recursively."""
return {key: sort_nested_dict(value) if isinstance(value, dict) else value for key, value in sorted(d.items())}
def create_nav_menu_yaml(nav_items: list, save: bool = False):
"""Creates a YAML file for the navigation menu based on the provided list of items."""
nav_tree = nested_dict()
for item_str in nav_items:
item = Path(item_str)
parts = item.parts
current_level = nav_tree["reference"]
for part in parts[2:-1]: # skip the first two parts (docs and reference) and the last part (filename)
current_level = current_level[part]
md_file_name = parts[-1].replace(".md", "")
current_level[md_file_name] = item
nav_tree_sorted = sort_nested_dict(nav_tree)
def _dict_to_yaml(d, level=0):
"""Converts a nested dictionary to a YAML-formatted string with indentation."""
yaml_str = ""
indent = " " * level
for k, v in d.items():
if isinstance(v, dict):
yaml_str += f"{indent}- {k}:\n{_dict_to_yaml(v, level + 1)}"
else:
yaml_str += f"{indent}- {k}: {str(v).replace('docs/en/', '')}\n"
return yaml_str
# Print updated YAML reference section
print("Scan complete, new mkdocs.yaml reference section is:\n\n", _dict_to_yaml(nav_tree_sorted))
# Save new YAML reference section
if save:
(PACKAGE_DIR.parent / "nav_menu_updated.yml").write_text(_dict_to_yaml(nav_tree_sorted))
def main():
"""Main function to extract class and function names, create Markdown files, and generate a YAML navigation menu."""
nav_items = []
for py_filepath in PACKAGE_DIR.rglob("*.py"):
classes, functions = extract_classes_and_functions(py_filepath)
if classes or functions:
py_filepath_rel = py_filepath.relative_to(PACKAGE_DIR)
md_filepath = REFERENCE_DIR / py_filepath_rel
module_path = f"{PACKAGE_DIR.name}.{py_filepath_rel.with_suffix('').as_posix().replace('/', '.')}"
md_rel_filepath = create_markdown(md_filepath, module_path, classes, functions)
nav_items.append(str(md_rel_filepath))
create_nav_menu_yaml(nav_items)
if __name__ == "__main__":
main()
| 5,232 | Python | .py | 97 | 47.484536 | 218 | 0.661113 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,794 | build_docs.py | arojsubedi_Improved-YOLOv8s/docs/build_docs.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
"""
This Python script is designed to automate the building and post-processing of MkDocs documentation, particularly for
projects with multilingual content. It streamlines the workflow for generating localized versions of the documentation
and updating HTML links to ensure they are correctly formatted.
Key Features:
- Automated building of MkDocs documentation: The script compiles both the main documentation and
any localized versions specified in separate MkDocs configuration files.
- Post-processing of generated HTML files: After the documentation is built, the script updates all
HTML files to remove the '.md' extension from internal links. This ensures that links in the built
HTML documentation correctly point to other HTML pages rather than Markdown files, which is crucial
for proper navigation within the web-based documentation.
Usage:
- Run the script from the root directory of your MkDocs project.
- Ensure that MkDocs is installed and that all MkDocs configuration files (main and localized versions)
are present in the project directory.
- The script first builds the documentation using MkDocs, then scans the generated HTML files in the 'site'
directory to update the internal links.
- It's ideal for projects where the documentation is written in Markdown and needs to be served as a static website.
Note:
- This script is built to be run in an environment where Python and MkDocs are installed and properly configured.
"""
import os
import re
import shutil
import subprocess
from pathlib import Path
from tqdm import tqdm
DOCS = Path(__file__).parent.resolve()
SITE = DOCS.parent / "site"
def build_docs(clone_repos=True):
"""Build docs using mkdocs."""
if SITE.exists():
print(f"Removing existing {SITE}")
shutil.rmtree(SITE)
# Get hub-sdk repo
if clone_repos:
repo = "https://github.com/ultralytics/hub-sdk"
local_dir = DOCS.parent / Path(repo).name
if not local_dir.exists():
os.system(f"git clone {repo} {local_dir}")
os.system(f"git -C {local_dir} pull") # update repo
shutil.rmtree(DOCS / "en/hub/sdk", ignore_errors=True) # delete if exists
shutil.copytree(local_dir / "docs", DOCS / "en/hub/sdk") # for docs
shutil.rmtree(DOCS.parent / "hub_sdk", ignore_errors=True) # delete if exists
shutil.copytree(local_dir / "hub_sdk", DOCS.parent / "hub_sdk") # for mkdocstrings
print(f"Cloned/Updated {repo} in {local_dir}")
# Build the main documentation
print(f"Building docs from {DOCS}")
subprocess.run(f"mkdocs build -f {DOCS.parent}/mkdocs.yml", check=True, shell=True)
print(f"Site built at {SITE}")
def update_page_title(file_path: Path, new_title: str):
"""Update the title of an HTML file."""
# Read the content of the file
with open(file_path, encoding="utf-8") as file:
content = file.read()
# Replace the existing title with the new title
updated_content = re.sub(r"<title>.*?</title>", f"<title>{new_title}</title>", content)
# Write the updated content back to the file
with open(file_path, "w", encoding="utf-8") as file:
file.write(updated_content)
def update_html_head(script=""):
"""Update the HTML head section of each file."""
html_files = Path(SITE).rglob("*.html")
for html_file in tqdm(html_files, desc="Processing HTML files"):
with html_file.open("r", encoding="utf-8") as file:
html_content = file.read()
if script in html_content: # script already in HTML file
return
head_end_index = html_content.lower().rfind("</head>")
if head_end_index != -1:
# Add the specified JavaScript to the HTML file just before the end of the head tag.
new_html_content = html_content[:head_end_index] + script + html_content[head_end_index:]
with html_file.open("w", encoding="utf-8") as file:
file.write(new_html_content)
def update_subdir_edit_links(subdir="", docs_url=""):
"""Update the HTML head section of each file."""
from bs4 import BeautifulSoup
if str(subdir[0]) == "/":
subdir = str(subdir[0])[1:]
html_files = (SITE / subdir).rglob("*.html")
for html_file in tqdm(html_files, desc="Processing subdir files"):
with html_file.open("r", encoding="utf-8") as file:
soup = BeautifulSoup(file, "html.parser")
# Find the anchor tag and update its href attribute
a_tag = soup.find("a", {"class": "md-content__button md-icon"})
if a_tag and a_tag["title"] == "Edit this page":
a_tag["href"] = f"{docs_url}{a_tag['href'].split(subdir)[-1]}"
# Write the updated HTML back to the file
with open(html_file, "w", encoding="utf-8") as file:
file.write(str(soup))
def main():
"""Builds docs, updates titles and edit links, and prints local server command."""
build_docs()
# Update titles
update_page_title(SITE / "404.html", new_title="Ultralytics Docs - Not Found")
# Update edit links
update_subdir_edit_links(
subdir="hub/sdk/", # do not use leading slash
docs_url="https://github.com/ultralytics/hub-sdk/tree/develop/docs/",
)
# Update HTML file head section
script = ""
if any(script):
update_html_head(script)
# Show command to serve built website
print('Serve site at http://localhost:8000 with "python -m http.server --directory site"')
if __name__ == "__main__":
main()
| 5,587 | Python | .py | 109 | 45.422018 | 118 | 0.687167 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,795 | __init__.py | arojsubedi_Improved-YOLOv8s/ultralytics/__init__.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = "8.1.19"
from ultralytics.data.explorer.explorer import Explorer
from ultralytics.models import RTDETR, SAM, YOLO, YOLOWorld
from ultralytics.models.fastsam import FastSAM
from ultralytics.models.nas import NAS
from ultralytics.utils import ASSETS, SETTINGS as settings
from ultralytics.utils.checks import check_yolo as checks
from ultralytics.utils.downloads import download
__all__ = (
"__version__",
"ASSETS",
"YOLO",
"YOLOWorld",
"NAS",
"SAM",
"FastSAM",
"RTDETR",
"checks",
"download",
"settings",
"Explorer",
)
| 625 | Python | .py | 23 | 23.956522 | 59 | 0.724541 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,796 | augment.py | arojsubedi_Improved-YOLOv8s/ultralytics/data/augment.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import math
import random
from copy import deepcopy
import cv2
import numpy as np
import torch
import torchvision.transforms as T
from ultralytics.utils import LOGGER, colorstr
from ultralytics.utils.checks import check_version
from ultralytics.utils.instance import Instances
from ultralytics.utils.metrics import bbox_ioa
from ultralytics.utils.ops import segment2box, xyxyxyxy2xywhr
from ultralytics.utils.torch_utils import TORCHVISION_0_10, TORCHVISION_0_11, TORCHVISION_0_13
from .utils import polygons2masks, polygons2masks_overlap
DEFAULT_MEAN = (0.0, 0.0, 0.0)
DEFAULT_STD = (1.0, 1.0, 1.0)
DEFAULT_CROP_FTACTION = 1.0
# TODO: we might need a BaseTransform to make all these augments be compatible with both classification and semantic
class BaseTransform:
"""
Base class for image transformations.
This is a generic transformation class that can be extended for specific image processing needs.
The class is designed to be compatible with both classification and semantic segmentation tasks.
Methods:
__init__: Initializes the BaseTransform object.
apply_image: Applies image transformation to labels.
apply_instances: Applies transformations to object instances in labels.
apply_semantic: Applies semantic segmentation to an image.
__call__: Applies all label transformations to an image, instances, and semantic masks.
"""
def __init__(self) -> None:
"""Initializes the BaseTransform object."""
pass
def apply_image(self, labels):
"""Applies image transformations to labels."""
pass
def apply_instances(self, labels):
"""Applies transformations to object instances in labels."""
pass
def apply_semantic(self, labels):
"""Applies semantic segmentation to an image."""
pass
def __call__(self, labels):
"""Applies all label transformations to an image, instances, and semantic masks."""
self.apply_image(labels)
self.apply_instances(labels)
self.apply_semantic(labels)
class Compose:
"""Class for composing multiple image transformations."""
def __init__(self, transforms):
"""Initializes the Compose object with a list of transforms."""
self.transforms = transforms
def __call__(self, data):
"""Applies a series of transformations to input data."""
for t in self.transforms:
data = t(data)
return data
def append(self, transform):
"""Appends a new transform to the existing list of transforms."""
self.transforms.append(transform)
def tolist(self):
"""Converts the list of transforms to a standard Python list."""
return self.transforms
def __repr__(self):
"""Returns a string representation of the object."""
return f"{self.__class__.__name__}({', '.join([f'{t}' for t in self.transforms])})"
class BaseMixTransform:
"""
Class for base mix (MixUp/Mosaic) transformations.
This implementation is from mmyolo.
"""
def __init__(self, dataset, pre_transform=None, p=0.0) -> None:
"""Initializes the BaseMixTransform object with dataset, pre_transform, and probability."""
self.dataset = dataset
self.pre_transform = pre_transform
self.p = p
def __call__(self, labels):
"""Applies pre-processing transforms and mixup/mosaic transforms to labels data."""
if random.uniform(0, 1) > self.p:
return labels
# Get index of one or three other images
indexes = self.get_indexes()
if isinstance(indexes, int):
indexes = [indexes]
# Get images information will be used for Mosaic or MixUp
mix_labels = [self.dataset.get_image_and_label(i) for i in indexes]
if self.pre_transform is not None:
for i, data in enumerate(mix_labels):
mix_labels[i] = self.pre_transform(data)
labels["mix_labels"] = mix_labels
# Mosaic or MixUp
labels = self._mix_transform(labels)
labels.pop("mix_labels", None)
return labels
def _mix_transform(self, labels):
"""Applies MixUp or Mosaic augmentation to the label dictionary."""
raise NotImplementedError
def get_indexes(self):
"""Gets a list of shuffled indexes for mosaic augmentation."""
raise NotImplementedError
class Mosaic(BaseMixTransform):
"""
Mosaic augmentation.
This class performs mosaic augmentation by combining multiple (4 or 9) images into a single mosaic image.
The augmentation is applied to a dataset with a given probability.
Attributes:
dataset: The dataset on which the mosaic augmentation is applied.
imgsz (int, optional): Image size (height and width) after mosaic pipeline of a single image. Default to 640.
p (float, optional): Probability of applying the mosaic augmentation. Must be in the range 0-1. Default to 1.0.
n (int, optional): The grid size, either 4 (for 2x2) or 9 (for 3x3).
"""
def __init__(self, dataset, imgsz=640, p=1.0, n=4):
"""Initializes the object with a dataset, image size, probability, and border."""
assert 0 <= p <= 1.0, f"The probability should be in range [0, 1], but got {p}."
assert n in (4, 9), "grid must be equal to 4 or 9."
super().__init__(dataset=dataset, p=p)
self.dataset = dataset
self.imgsz = imgsz
self.border = (-imgsz // 2, -imgsz // 2) # width, height
self.n = n
def get_indexes(self, buffer=True):
"""Return a list of random indexes from the dataset."""
if buffer: # select images from buffer
return random.choices(list(self.dataset.buffer), k=self.n - 1)
else: # select any images
return [random.randint(0, len(self.dataset) - 1) for _ in range(self.n - 1)]
def _mix_transform(self, labels):
"""Apply mixup transformation to the input image and labels."""
assert labels.get("rect_shape", None) is None, "rect and mosaic are mutually exclusive."
assert len(labels.get("mix_labels", [])), "There are no other images for mosaic augment."
return (
self._mosaic3(labels) if self.n == 3 else self._mosaic4(labels) if self.n == 4 else self._mosaic9(labels)
) # This code is modified for mosaic3 method.
def _mosaic3(self, labels):
"""Create a 1x3 image mosaic."""
mosaic_labels = []
s = self.imgsz
for i in range(3):
labels_patch = labels if i == 0 else labels["mix_labels"][i - 1]
# Load image
img = labels_patch["img"]
h, w = labels_patch.pop("resized_shape")
# Place img in img3
if i == 0: # center
img3 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 3 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 2: # left
c = s - w, s + h0 - h, s, s + h0
padw, padh = c[:2]
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
img3[y1:y2, x1:x2] = img[y1 - padh :, x1 - padw :] # img3[ymin:ymax, xmin:xmax]
# hp, wp = h, w # height, width previous for next iteration
# Labels assuming imgsz*2 mosaic size
labels_patch = self._update_labels(labels_patch, padw + self.border[0], padh + self.border[1])
mosaic_labels.append(labels_patch)
final_labels = self._cat_labels(mosaic_labels)
final_labels["img"] = img3[-self.border[0] : self.border[0], -self.border[1] : self.border[1]]
return final_labels
def _mosaic4(self, labels):
"""Create a 2x2 image mosaic."""
mosaic_labels = []
s = self.imgsz
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.border) # mosaic center x, y
for i in range(4):
labels_patch = labels if i == 0 else labels["mix_labels"][i - 1]
# Load image
img = labels_patch["img"]
h, w = labels_patch.pop("resized_shape")
# Place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
labels_patch = self._update_labels(labels_patch, padw, padh)
mosaic_labels.append(labels_patch)
final_labels = self._cat_labels(mosaic_labels)
final_labels["img"] = img4
return final_labels
def _mosaic9(self, labels):
"""Create a 3x3 image mosaic."""
mosaic_labels = []
s = self.imgsz
hp, wp = -1, -1 # height, width previous
for i in range(9):
labels_patch = labels if i == 0 else labels["mix_labels"][i - 1]
# Load image
img = labels_patch["img"]
h, w = labels_patch.pop("resized_shape")
# Place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padw, padh = c[:2]
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
# Image
img9[y1:y2, x1:x2] = img[y1 - padh :, x1 - padw :] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous for next iteration
# Labels assuming imgsz*2 mosaic size
labels_patch = self._update_labels(labels_patch, padw + self.border[0], padh + self.border[1])
mosaic_labels.append(labels_patch)
final_labels = self._cat_labels(mosaic_labels)
final_labels["img"] = img9[-self.border[0] : self.border[0], -self.border[1] : self.border[1]]
return final_labels
@staticmethod
def _update_labels(labels, padw, padh):
"""Update labels."""
nh, nw = labels["img"].shape[:2]
labels["instances"].convert_bbox(format="xyxy")
labels["instances"].denormalize(nw, nh)
labels["instances"].add_padding(padw, padh)
return labels
def _cat_labels(self, mosaic_labels):
"""Return labels with mosaic border instances clipped."""
if len(mosaic_labels) == 0:
return {}
cls = []
instances = []
imgsz = self.imgsz * 2 # mosaic imgsz
for labels in mosaic_labels:
cls.append(labels["cls"])
instances.append(labels["instances"])
# Final labels
final_labels = {
"im_file": mosaic_labels[0]["im_file"],
"ori_shape": mosaic_labels[0]["ori_shape"],
"resized_shape": (imgsz, imgsz),
"cls": np.concatenate(cls, 0),
"instances": Instances.concatenate(instances, axis=0),
"mosaic_border": self.border,
}
final_labels["instances"].clip(imgsz, imgsz)
good = final_labels["instances"].remove_zero_area_boxes()
final_labels["cls"] = final_labels["cls"][good]
return final_labels
class MixUp(BaseMixTransform):
"""Class for applying MixUp augmentation to the dataset."""
def __init__(self, dataset, pre_transform=None, p=0.0) -> None:
"""Initializes MixUp object with dataset, pre_transform, and probability of applying MixUp."""
super().__init__(dataset=dataset, pre_transform=pre_transform, p=p)
def get_indexes(self):
"""Get a random index from the dataset."""
return random.randint(0, len(self.dataset) - 1)
def _mix_transform(self, labels):
"""Applies MixUp augmentation as per https://arxiv.org/pdf/1710.09412.pdf."""
r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
labels2 = labels["mix_labels"][0]
labels["img"] = (labels["img"] * r + labels2["img"] * (1 - r)).astype(np.uint8)
labels["instances"] = Instances.concatenate([labels["instances"], labels2["instances"]], axis=0)
labels["cls"] = np.concatenate([labels["cls"], labels2["cls"]], 0)
return labels
class RandomPerspective:
"""
Implements random perspective and affine transformations on images and corresponding bounding boxes, segments, and
keypoints. These transformations include rotation, translation, scaling, and shearing. The class also offers the
option to apply these transformations conditionally with a specified probability.
Attributes:
degrees (float): Degree range for random rotations.
translate (float): Fraction of total width and height for random translation.
scale (float): Scaling factor interval, e.g., a scale factor of 0.1 allows a resize between 90%-110%.
shear (float): Shear intensity (angle in degrees).
perspective (float): Perspective distortion factor.
border (tuple): Tuple specifying mosaic border.
pre_transform (callable): A function/transform to apply to the image before starting the random transformation.
Methods:
affine_transform(img, border): Applies a series of affine transformations to the image.
apply_bboxes(bboxes, M): Transforms bounding boxes using the calculated affine matrix.
apply_segments(segments, M): Transforms segments and generates new bounding boxes.
apply_keypoints(keypoints, M): Transforms keypoints.
__call__(labels): Main method to apply transformations to both images and their corresponding annotations.
box_candidates(box1, box2): Filters out bounding boxes that don't meet certain criteria post-transformation.
"""
def __init__(
self, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, border=(0, 0), pre_transform=None
):
"""Initializes RandomPerspective object with transformation parameters."""
self.degrees = degrees
self.translate = translate
self.scale = scale
self.shear = shear
self.perspective = perspective
self.border = border # mosaic border
self.pre_transform = pre_transform
def affine_transform(self, img, border):
"""
Applies a sequence of affine transformations centered around the image center.
Args:
img (ndarray): Input image.
border (tuple): Border dimensions.
Returns:
img (ndarray): Transformed image.
M (ndarray): Transformation matrix.
s (float): Scale factor.
"""
# Center
C = np.eye(3, dtype=np.float32)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3, dtype=np.float32)
P[2, 0] = random.uniform(-self.perspective, self.perspective) # x perspective (about y)
P[2, 1] = random.uniform(-self.perspective, self.perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3, dtype=np.float32)
a = random.uniform(-self.degrees, self.degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - self.scale, 1 + self.scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3, dtype=np.float32)
S[0, 1] = math.tan(random.uniform(-self.shear, self.shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-self.shear, self.shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3, dtype=np.float32)
T[0, 2] = random.uniform(0.5 - self.translate, 0.5 + self.translate) * self.size[0] # x translation (pixels)
T[1, 2] = random.uniform(0.5 - self.translate, 0.5 + self.translate) * self.size[1] # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
# Affine image
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if self.perspective:
img = cv2.warpPerspective(img, M, dsize=self.size, borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=self.size, borderValue=(114, 114, 114))
return img, M, s
def apply_bboxes(self, bboxes, M):
"""
Apply affine to bboxes only.
Args:
bboxes (ndarray): list of bboxes, xyxy format, with shape (num_bboxes, 4).
M (ndarray): affine matrix.
Returns:
new_bboxes (ndarray): bboxes after affine, [num_bboxes, 4].
"""
n = len(bboxes)
if n == 0:
return bboxes
xy = np.ones((n * 4, 3), dtype=bboxes.dtype)
xy[:, :2] = bboxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if self.perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# Create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
return np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1)), dtype=bboxes.dtype).reshape(4, n).T
def apply_segments(self, segments, M):
"""
Apply affine to segments and generate new bboxes from segments.
Args:
segments (ndarray): list of segments, [num_samples, 500, 2].
M (ndarray): affine matrix.
Returns:
new_segments (ndarray): list of segments after affine, [num_samples, 500, 2].
new_bboxes (ndarray): bboxes after affine, [N, 4].
"""
n, num = segments.shape[:2]
if n == 0:
return [], segments
xy = np.ones((n * num, 3), dtype=segments.dtype)
segments = segments.reshape(-1, 2)
xy[:, :2] = segments
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3]
segments = xy.reshape(n, -1, 2)
bboxes = np.stack([segment2box(xy, self.size[0], self.size[1]) for xy in segments], 0)
segments[..., 0] = segments[..., 0].clip(bboxes[:, 0:1], bboxes[:, 2:3])
segments[..., 1] = segments[..., 1].clip(bboxes[:, 1:2], bboxes[:, 3:4])
return bboxes, segments
def apply_keypoints(self, keypoints, M):
"""
Apply affine to keypoints.
Args:
keypoints (ndarray): keypoints, [N, 17, 3].
M (ndarray): affine matrix.
Returns:
new_keypoints (ndarray): keypoints after affine, [N, 17, 3].
"""
n, nkpt = keypoints.shape[:2]
if n == 0:
return keypoints
xy = np.ones((n * nkpt, 3), dtype=keypoints.dtype)
visible = keypoints[..., 2].reshape(n * nkpt, 1)
xy[:, :2] = keypoints[..., :2].reshape(n * nkpt, 2)
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] # perspective rescale or affine
out_mask = (xy[:, 0] < 0) | (xy[:, 1] < 0) | (xy[:, 0] > self.size[0]) | (xy[:, 1] > self.size[1])
visible[out_mask] = 0
return np.concatenate([xy, visible], axis=-1).reshape(n, nkpt, 3)
def __call__(self, labels):
"""
Affine images and targets.
Args:
labels (dict): a dict of `bboxes`, `segments`, `keypoints`.
"""
if self.pre_transform and "mosaic_border" not in labels:
labels = self.pre_transform(labels)
labels.pop("ratio_pad", None) # do not need ratio pad
img = labels["img"]
cls = labels["cls"]
instances = labels.pop("instances")
# Make sure the coord formats are right
instances.convert_bbox(format="xyxy")
instances.denormalize(*img.shape[:2][::-1])
border = labels.pop("mosaic_border", self.border)
self.size = img.shape[1] + border[1] * 2, img.shape[0] + border[0] * 2 # w, h
# M is affine matrix
# Scale for func:`box_candidates`
img, M, scale = self.affine_transform(img, border)
bboxes = self.apply_bboxes(instances.bboxes, M)
segments = instances.segments
keypoints = instances.keypoints
# Update bboxes if there are segments.
if len(segments):
bboxes, segments = self.apply_segments(segments, M)
if keypoints is not None:
keypoints = self.apply_keypoints(keypoints, M)
new_instances = Instances(bboxes, segments, keypoints, bbox_format="xyxy", normalized=False)
# Clip
new_instances.clip(*self.size)
# Filter instances
instances.scale(scale_w=scale, scale_h=scale, bbox_only=True)
# Make the bboxes have the same scale with new_bboxes
i = self.box_candidates(
box1=instances.bboxes.T, box2=new_instances.bboxes.T, area_thr=0.01 if len(segments) else 0.10
)
labels["instances"] = new_instances[i]
labels["cls"] = cls[i]
labels["img"] = img
labels["resized_shape"] = img.shape[:2]
return labels
def box_candidates(self, box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16):
"""
Compute box candidates based on a set of thresholds. This method compares the characteristics of the boxes
before and after augmentation to decide whether a box is a candidate for further processing.
Args:
box1 (numpy.ndarray): The 4,n bounding box before augmentation, represented as [x1, y1, x2, y2].
box2 (numpy.ndarray): The 4,n bounding box after augmentation, represented as [x1, y1, x2, y2].
wh_thr (float, optional): The width and height threshold in pixels. Default is 2.
ar_thr (float, optional): The aspect ratio threshold. Default is 100.
area_thr (float, optional): The area ratio threshold. Default is 0.1.
eps (float, optional): A small epsilon value to prevent division by zero. Default is 1e-16.
Returns:
(numpy.ndarray): A boolean array indicating which boxes are candidates based on the given thresholds.
"""
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
class RandomHSV:
"""
This class is responsible for performing random adjustments to the Hue, Saturation, and Value (HSV) channels of an
image.
The adjustments are random but within limits set by hgain, sgain, and vgain.
"""
def __init__(self, hgain=0.5, sgain=0.5, vgain=0.5) -> None:
"""
Initialize RandomHSV class with gains for each HSV channel.
Args:
hgain (float, optional): Maximum variation for hue. Default is 0.5.
sgain (float, optional): Maximum variation for saturation. Default is 0.5.
vgain (float, optional): Maximum variation for value. Default is 0.5.
"""
self.hgain = hgain
self.sgain = sgain
self.vgain = vgain
def __call__(self, labels):
"""
Applies random HSV augmentation to an image within the predefined limits.
The modified image replaces the original image in the input 'labels' dict.
"""
img = labels["img"]
if self.hgain or self.sgain or self.vgain:
r = np.random.uniform(-1, 1, 3) * [self.hgain, self.sgain, self.vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=r.dtype)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
return labels
class RandomFlip:
"""
Applies a random horizontal or vertical flip to an image with a given probability.
Also updates any instances (bounding boxes, keypoints, etc.) accordingly.
"""
def __init__(self, p=0.5, direction="horizontal", flip_idx=None) -> None:
"""
Initializes the RandomFlip class with probability and direction.
Args:
p (float, optional): The probability of applying the flip. Must be between 0 and 1. Default is 0.5.
direction (str, optional): The direction to apply the flip. Must be 'horizontal' or 'vertical'.
Default is 'horizontal'.
flip_idx (array-like, optional): Index mapping for flipping keypoints, if any.
"""
assert direction in ["horizontal", "vertical"], f"Support direction `horizontal` or `vertical`, got {direction}"
assert 0 <= p <= 1.0
self.p = p
self.direction = direction
self.flip_idx = flip_idx
def __call__(self, labels):
"""
Applies random flip to an image and updates any instances like bounding boxes or keypoints accordingly.
Args:
labels (dict): A dictionary containing the keys 'img' and 'instances'. 'img' is the image to be flipped.
'instances' is an object containing bounding boxes and optionally keypoints.
Returns:
(dict): The same dict with the flipped image and updated instances under the 'img' and 'instances' keys.
"""
img = labels["img"]
instances = labels.pop("instances")
instances.convert_bbox(format="xywh")
h, w = img.shape[:2]
h = 1 if instances.normalized else h
w = 1 if instances.normalized else w
# Flip up-down
if self.direction == "vertical" and random.random() < self.p:
img = np.flipud(img)
instances.flipud(h)
if self.direction == "horizontal" and random.random() < self.p:
img = np.fliplr(img)
instances.fliplr(w)
# For keypoints
if self.flip_idx is not None and instances.keypoints is not None:
instances.keypoints = np.ascontiguousarray(instances.keypoints[:, self.flip_idx, :])
labels["img"] = np.ascontiguousarray(img)
labels["instances"] = instances
return labels
class LetterBox:
"""Resize image and padding for detection, instance segmentation, pose."""
def __init__(self, new_shape=(640, 640), auto=False, scaleFill=False, scaleup=True, center=True, stride=32):
"""Initialize LetterBox object with specific parameters."""
self.new_shape = new_shape
self.auto = auto
self.scaleFill = scaleFill
self.scaleup = scaleup
self.stride = stride
self.center = center # Put the image in the middle or top-left
def __call__(self, labels=None, image=None):
"""Return updated labels and image with added border."""
if labels is None:
labels = {}
img = labels.get("img") if image is None else image
shape = img.shape[:2] # current shape [height, width]
new_shape = labels.pop("rect_shape", self.new_shape)
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not self.scaleup: # only scale down, do not scale up (for better val mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if self.auto: # minimum rectangle
dw, dh = np.mod(dw, self.stride), np.mod(dh, self.stride) # wh padding
elif self.scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
if self.center:
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)) if self.center else 0, int(round(dh + 0.1))
left, right = int(round(dw - 0.1)) if self.center else 0, int(round(dw + 0.1))
img = cv2.copyMakeBorder(
img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114)
) # add border
if labels.get("ratio_pad"):
labels["ratio_pad"] = (labels["ratio_pad"], (left, top)) # for evaluation
if len(labels):
labels = self._update_labels(labels, ratio, dw, dh)
labels["img"] = img
labels["resized_shape"] = new_shape
return labels
else:
return img
def _update_labels(self, labels, ratio, padw, padh):
"""Update labels."""
labels["instances"].convert_bbox(format="xyxy")
labels["instances"].denormalize(*labels["img"].shape[:2][::-1])
labels["instances"].scale(*ratio)
labels["instances"].add_padding(padw, padh)
return labels
class CopyPaste:
"""
Implements the Copy-Paste augmentation as described in the paper https://arxiv.org/abs/2012.07177. This class is
responsible for applying the Copy-Paste augmentation on images and their corresponding instances.
"""
def __init__(self, p=0.5) -> None:
"""
Initializes the CopyPaste class with a given probability.
Args:
p (float, optional): The probability of applying the Copy-Paste augmentation. Must be between 0 and 1.
Default is 0.5.
"""
self.p = p
def __call__(self, labels):
"""
Applies the Copy-Paste augmentation to the given image and instances.
Args:
labels (dict): A dictionary containing:
- 'img': The image to augment.
- 'cls': Class labels associated with the instances.
- 'instances': Object containing bounding boxes, and optionally, keypoints and segments.
Returns:
(dict): Dict with augmented image and updated instances under the 'img', 'cls', and 'instances' keys.
Notes:
1. Instances are expected to have 'segments' as one of their attributes for this augmentation to work.
2. This method modifies the input dictionary 'labels' in place.
"""
im = labels["img"]
cls = labels["cls"]
h, w = im.shape[:2]
instances = labels.pop("instances")
instances.convert_bbox(format="xyxy")
instances.denormalize(w, h)
if self.p and len(instances.segments):
n = len(instances)
_, w, _ = im.shape # height, width, channels
im_new = np.zeros(im.shape, np.uint8)
# Calculate ioa first then select indexes randomly
ins_flip = deepcopy(instances)
ins_flip.fliplr(w)
ioa = bbox_ioa(ins_flip.bboxes, instances.bboxes) # intersection over area, (N, M)
indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, )
n = len(indexes)
for j in random.sample(list(indexes), k=round(self.p * n)):
cls = np.concatenate((cls, cls[[j]]), axis=0)
instances = Instances.concatenate((instances, ins_flip[[j]]), axis=0)
cv2.drawContours(im_new, instances.segments[[j]].astype(np.int32), -1, (1, 1, 1), cv2.FILLED)
result = cv2.flip(im, 1) # augment segments (flip left-right)
i = cv2.flip(im_new, 1).astype(bool)
im[i] = result[i]
labels["img"] = im
labels["cls"] = cls
labels["instances"] = instances
return labels
class Albumentations:
"""
Albumentations transformations.
Optional, uninstall package to disable. Applies Blur, Median Blur, convert to grayscale, Contrast Limited Adaptive
Histogram Equalization, random change of brightness and contrast, RandomGamma and lowering of image quality by
compression.
"""
def __init__(self, p=1.0):
"""Initialize the transform object for YOLO bbox formatted params."""
self.p = p
self.transform = None
prefix = colorstr("albumentations: ")
try:
import albumentations as A
check_version(A.__version__, "1.0.3", hard=True) # version requirement
# Transforms
T = [
A.Blur(p=0.01),
A.MedianBlur(p=0.01),
A.ToGray(p=0.01),
A.CLAHE(p=0.01),
A.RandomBrightnessContrast(p=0.0),
A.RandomGamma(p=0.0),
A.ImageCompression(quality_lower=75, p=0.0),
]
self.transform = A.Compose(T, bbox_params=A.BboxParams(format="yolo", label_fields=["class_labels"]))
LOGGER.info(prefix + ", ".join(f"{x}".replace("always_apply=False, ", "") for x in T if x.p))
except ImportError: # package not installed, skip
pass
except Exception as e:
LOGGER.info(f"{prefix}{e}")
def __call__(self, labels):
"""Generates object detections and returns a dictionary with detection results."""
im = labels["img"]
cls = labels["cls"]
if len(cls):
labels["instances"].convert_bbox("xywh")
labels["instances"].normalize(*im.shape[:2][::-1])
bboxes = labels["instances"].bboxes
# TODO: add supports of segments and keypoints
if self.transform and random.random() < self.p:
new = self.transform(image=im, bboxes=bboxes, class_labels=cls) # transformed
if len(new["class_labels"]) > 0: # skip update if no bbox in new im
labels["img"] = new["image"]
labels["cls"] = np.array(new["class_labels"])
bboxes = np.array(new["bboxes"], dtype=np.float32)
labels["instances"].update(bboxes=bboxes)
return labels
# TODO: technically this is not an augmentation, maybe we should put this to another files
class Format:
"""
Formats image annotations for object detection, instance segmentation, and pose estimation tasks. The class
standardizes the image and instance annotations to be used by the `collate_fn` in PyTorch DataLoader.
Attributes:
bbox_format (str): Format for bounding boxes. Default is 'xywh'.
normalize (bool): Whether to normalize bounding boxes. Default is True.
return_mask (bool): Return instance masks for segmentation. Default is False.
return_keypoint (bool): Return keypoints for pose estimation. Default is False.
mask_ratio (int): Downsample ratio for masks. Default is 4.
mask_overlap (bool): Whether to overlap masks. Default is True.
batch_idx (bool): Keep batch indexes. Default is True.
"""
def __init__(
self,
bbox_format="xywh",
normalize=True,
return_mask=False,
return_keypoint=False,
return_obb=False,
mask_ratio=4,
mask_overlap=True,
batch_idx=True,
):
"""Initializes the Format class with given parameters."""
self.bbox_format = bbox_format
self.normalize = normalize
self.return_mask = return_mask # set False when training detection only
self.return_keypoint = return_keypoint
self.return_obb = return_obb
self.mask_ratio = mask_ratio
self.mask_overlap = mask_overlap
self.batch_idx = batch_idx # keep the batch indexes
def __call__(self, labels):
"""Return formatted image, classes, bounding boxes & keypoints to be used by 'collate_fn'."""
img = labels.pop("img")
h, w = img.shape[:2]
cls = labels.pop("cls")
instances = labels.pop("instances")
instances.convert_bbox(format=self.bbox_format)
instances.denormalize(w, h)
nl = len(instances)
if self.return_mask:
if nl:
masks, instances, cls = self._format_segments(instances, cls, w, h)
masks = torch.from_numpy(masks)
else:
masks = torch.zeros(
1 if self.mask_overlap else nl, img.shape[0] // self.mask_ratio, img.shape[1] // self.mask_ratio
)
labels["masks"] = masks
if self.normalize:
instances.normalize(w, h)
labels["img"] = self._format_img(img)
labels["cls"] = torch.from_numpy(cls) if nl else torch.zeros(nl)
labels["bboxes"] = torch.from_numpy(instances.bboxes) if nl else torch.zeros((nl, 4))
if self.return_keypoint:
labels["keypoints"] = torch.from_numpy(instances.keypoints)
if self.return_obb:
labels["bboxes"] = (
xyxyxyxy2xywhr(torch.from_numpy(instances.segments)) if len(instances.segments) else torch.zeros((0, 5))
)
# Then we can use collate_fn
if self.batch_idx:
labels["batch_idx"] = torch.zeros(nl)
return labels
def _format_img(self, img):
"""Format the image for YOLO from Numpy array to PyTorch tensor."""
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1)[::-1])
img = torch.from_numpy(img)
return img
def _format_segments(self, instances, cls, w, h):
"""Convert polygon points to bitmap."""
segments = instances.segments
if self.mask_overlap:
masks, sorted_idx = polygons2masks_overlap((h, w), segments, downsample_ratio=self.mask_ratio)
masks = masks[None] # (640, 640) -> (1, 640, 640)
instances = instances[sorted_idx]
cls = cls[sorted_idx]
else:
masks = polygons2masks((h, w), segments, color=1, downsample_ratio=self.mask_ratio)
return masks, instances, cls
def v8_transforms(dataset, imgsz, hyp, stretch=False):
"""Convert images to a size suitable for YOLOv8 training."""
pre_transform = Compose(
[
Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic),
CopyPaste(p=hyp.copy_paste),
RandomPerspective(
degrees=hyp.degrees,
translate=hyp.translate,
scale=hyp.scale,
shear=hyp.shear,
perspective=hyp.perspective,
pre_transform=None if stretch else LetterBox(new_shape=(imgsz, imgsz)),
),
]
)
flip_idx = dataset.data.get("flip_idx", []) # for keypoints augmentation
if dataset.use_keypoints:
kpt_shape = dataset.data.get("kpt_shape", None)
if len(flip_idx) == 0 and hyp.fliplr > 0.0:
hyp.fliplr = 0.0
LOGGER.warning("WARNING ⚠� No 'flip_idx' array defined in data.yaml, setting augmentation 'fliplr=0.0'")
elif flip_idx and (len(flip_idx) != kpt_shape[0]):
raise ValueError(f"data.yaml flip_idx={flip_idx} length must be equal to kpt_shape[0]={kpt_shape[0]}")
return Compose(
[
pre_transform,
MixUp(dataset, pre_transform=pre_transform, p=hyp.mixup),
Albumentations(p=1.0),
RandomHSV(hgain=hyp.hsv_h, sgain=hyp.hsv_s, vgain=hyp.hsv_v),
RandomFlip(direction="vertical", p=hyp.flipud),
RandomFlip(direction="horizontal", p=hyp.fliplr, flip_idx=flip_idx),
]
) # transforms
# Classification augmentations -----------------------------------------------------------------------------------------
def classify_transforms(
size=224,
mean=DEFAULT_MEAN,
std=DEFAULT_STD,
interpolation: T.InterpolationMode = T.InterpolationMode.BILINEAR,
crop_fraction: float = DEFAULT_CROP_FTACTION,
):
"""
Classification transforms for evaluation/inference. Inspired by timm/data/transforms_factory.py.
Args:
size (int): image size
mean (tuple): mean values of RGB channels
std (tuple): std values of RGB channels
interpolation (T.InterpolationMode): interpolation mode. default is T.InterpolationMode.BILINEAR.
crop_fraction (float): fraction of image to crop. default is 1.0.
Returns:
(T.Compose): torchvision transforms
"""
if isinstance(size, (tuple, list)):
assert len(size) == 2
scale_size = tuple(math.floor(x / crop_fraction) for x in size)
else:
scale_size = math.floor(size / crop_fraction)
scale_size = (scale_size, scale_size)
# aspect ratio is preserved, crops center within image, no borders are added, image is lost
if scale_size[0] == scale_size[1]:
# simple case, use torchvision built-in Resize w/ shortest edge mode (scalar size arg)
tfl = [T.Resize(scale_size[0], interpolation=interpolation)]
else:
# resize shortest edge to matching target dim for non-square target
tfl = [T.Resize(scale_size)]
tfl += [T.CenterCrop(size)]
tfl += [
T.ToTensor(),
T.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std),
),
]
return T.Compose(tfl)
# Classification augmentations train ---------------------------------------------------------------------------------------
def classify_augmentations(
size=224,
mean=DEFAULT_MEAN,
std=DEFAULT_STD,
scale=None,
ratio=None,
hflip=0.5,
vflip=0.0,
auto_augment=None,
hsv_h=0.015, # image HSV-Hue augmentation (fraction)
hsv_s=0.4, # image HSV-Saturation augmentation (fraction)
hsv_v=0.4, # image HSV-Value augmentation (fraction)
force_color_jitter=False,
erasing=0.0,
interpolation: T.InterpolationMode = T.InterpolationMode.BILINEAR,
):
"""
Classification transforms with augmentation for training. Inspired by timm/data/transforms_factory.py.
Args:
size (int): image size
scale (tuple): scale range of the image. default is (0.08, 1.0)
ratio (tuple): aspect ratio range of the image. default is (3./4., 4./3.)
mean (tuple): mean values of RGB channels
std (tuple): std values of RGB channels
hflip (float): probability of horizontal flip
vflip (float): probability of vertical flip
auto_augment (str): auto augmentation policy. can be 'randaugment', 'augmix', 'autoaugment' or None.
hsv_h (float): image HSV-Hue augmentation (fraction)
hsv_s (float): image HSV-Saturation augmentation (fraction)
hsv_v (float): image HSV-Value augmentation (fraction)
force_color_jitter (bool): force to apply color jitter even if auto augment is enabled
erasing (float): probability of random erasing
interpolation (T.InterpolationMode): interpolation mode. default is T.InterpolationMode.BILINEAR.
Returns:
(T.Compose): torchvision transforms
"""
# Transforms to apply if albumentations not installed
if not isinstance(size, int):
raise TypeError(f"classify_transforms() size {size} must be integer, not (list, tuple)")
scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range
ratio = tuple(ratio or (3.0 / 4.0, 4.0 / 3.0)) # default imagenet ratio range
primary_tfl = [T.RandomResizedCrop(size, scale=scale, ratio=ratio, interpolation=interpolation)]
if hflip > 0.0:
primary_tfl += [T.RandomHorizontalFlip(p=hflip)]
if vflip > 0.0:
primary_tfl += [T.RandomVerticalFlip(p=vflip)]
secondary_tfl = []
disable_color_jitter = False
if auto_augment:
assert isinstance(auto_augment, str)
# color jitter is typically disabled if AA/RA on,
# this allows override without breaking old hparm cfgs
disable_color_jitter = not force_color_jitter
if auto_augment == "randaugment":
if TORCHVISION_0_11:
secondary_tfl += [T.RandAugment(interpolation=interpolation)]
else:
LOGGER.warning('"auto_augment=randaugment" requires torchvision >= 0.11.0. Disabling it.')
elif auto_augment == "augmix":
if TORCHVISION_0_13:
secondary_tfl += [T.AugMix(interpolation=interpolation)]
else:
LOGGER.warning('"auto_augment=augmix" requires torchvision >= 0.13.0. Disabling it.')
elif auto_augment == "autoaugment":
if TORCHVISION_0_10:
secondary_tfl += [T.AutoAugment(interpolation=interpolation)]
else:
LOGGER.warning('"auto_augment=autoaugment" requires torchvision >= 0.10.0. Disabling it.')
else:
raise ValueError(
f'Invalid auto_augment policy: {auto_augment}. Should be one of "randaugment", '
f'"augmix", "autoaugment" or None'
)
if not disable_color_jitter:
secondary_tfl += [T.ColorJitter(brightness=hsv_v, contrast=hsv_v, saturation=hsv_s, hue=hsv_h)]
final_tfl = [
T.ToTensor(),
T.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)),
T.RandomErasing(p=erasing, inplace=True),
]
return T.Compose(primary_tfl + secondary_tfl + final_tfl)
# NOTE: keep this class for backward compatibility
class ClassifyLetterBox:
"""
YOLOv8 LetterBox class for image preprocessing, designed to be part of a transformation pipeline, e.g.,
T.Compose([LetterBox(size), ToTensor()]).
Attributes:
h (int): Target height of the image.
w (int): Target width of the image.
auto (bool): If True, automatically solves for short side using stride.
stride (int): The stride value, used when 'auto' is True.
"""
def __init__(self, size=(640, 640), auto=False, stride=32):
"""
Initializes the ClassifyLetterBox class with a target size, auto-flag, and stride.
Args:
size (Union[int, Tuple[int, int]]): The target dimensions (height, width) for the letterbox.
auto (bool): If True, automatically calculates the short side based on stride.
stride (int): The stride value, used when 'auto' is True.
"""
super().__init__()
self.h, self.w = (size, size) if isinstance(size, int) else size
self.auto = auto # pass max size integer, automatically solve for short side using stride
self.stride = stride # used with auto
def __call__(self, im):
"""
Resizes the image and pads it with a letterbox method.
Args:
im (numpy.ndarray): The input image as a numpy array of shape HWC.
Returns:
(numpy.ndarray): The letterboxed and resized image as a numpy array.
"""
imh, imw = im.shape[:2]
r = min(self.h / imh, self.w / imw) # ratio of new/old dimensions
h, w = round(imh * r), round(imw * r) # resized image dimensions
# Calculate padding dimensions
hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else (self.h, self.w)
top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1)
# Create padded image
im_out = np.full((hs, ws, 3), 114, dtype=im.dtype)
im_out[top : top + h, left : left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
return im_out
# NOTE: keep this class for backward compatibility
class CenterCrop:
"""YOLOv8 CenterCrop class for image preprocessing, designed to be part of a transformation pipeline, e.g.,
T.Compose([CenterCrop(size), ToTensor()]).
"""
def __init__(self, size=640):
"""Converts an image from numpy array to PyTorch tensor."""
super().__init__()
self.h, self.w = (size, size) if isinstance(size, int) else size
def __call__(self, im):
"""
Resizes and crops the center of the image using a letterbox method.
Args:
im (numpy.ndarray): The input image as a numpy array of shape HWC.
Returns:
(numpy.ndarray): The center-cropped and resized image as a numpy array.
"""
imh, imw = im.shape[:2]
m = min(imh, imw) # min dimension
top, left = (imh - m) // 2, (imw - m) // 2
return cv2.resize(im[top : top + m, left : left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR)
# NOTE: keep this class for backward compatibility
class ToTensor:
"""YOLOv8 ToTensor class for image preprocessing, i.e., T.Compose([LetterBox(size), ToTensor()])."""
def __init__(self, half=False):
"""Initialize YOLOv8 ToTensor object with optional half-precision support."""
super().__init__()
self.half = half
def __call__(self, im):
"""
Transforms an image from a numpy array to a PyTorch tensor, applying optional half-precision and normalization.
Args:
im (numpy.ndarray): Input image as a numpy array with shape (H, W, C) in BGR order.
Returns:
(torch.Tensor): The transformed image as a PyTorch tensor in float32 or float16, normalized to [0, 1].
"""
im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous
im = torch.from_numpy(im) # to torch
im = im.half() if self.half else im.float() # uint8 to fp16/32
im /= 255.0 # 0-255 to 0.0-1.0
return im
| 52,000 | Python | .py | 1,047 | 40.03916 | 124 | 0.599862 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,797 | converter.py | arojsubedi_Improved-YOLOv8s/ultralytics/data/converter.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import json
from collections import defaultdict
from pathlib import Path
import cv2
import numpy as np
from ultralytics.utils import LOGGER, TQDM
from ultralytics.utils.files import increment_path
def coco91_to_coco80_class():
"""
Converts 91-index COCO class IDs to 80-index COCO class IDs.
Returns:
(list): A list of 91 class IDs where the index represents the 80-index class ID and the value is the
corresponding 91-index class ID.
"""
return [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
None,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
None,
24,
25,
None,
None,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
None,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
None,
60,
None,
None,
61,
None,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
None,
73,
74,
75,
76,
77,
78,
79,
None,
]
def coco80_to_coco91_class():
"""
Converts 80-index (val2014) to 91-index (paper).
For details see https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/.
Example:
```python
import numpy as np
a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
```
"""
return [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
27,
28,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
67,
70,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
84,
85,
86,
87,
88,
89,
90,
]
def convert_coco(
labels_dir="../coco/annotations/",
save_dir="coco_converted/",
use_segments=False,
use_keypoints=False,
cls91to80=True,
):
"""
Converts COCO dataset annotations to a YOLO annotation format suitable for training YOLO models.
Args:
labels_dir (str, optional): Path to directory containing COCO dataset annotation files.
save_dir (str, optional): Path to directory to save results to.
use_segments (bool, optional): Whether to include segmentation masks in the output.
use_keypoints (bool, optional): Whether to include keypoint annotations in the output.
cls91to80 (bool, optional): Whether to map 91 COCO class IDs to the corresponding 80 COCO class IDs.
Example:
```python
from ultralytics.data.converter import convert_coco
convert_coco('../datasets/coco/annotations/', use_segments=True, use_keypoints=False, cls91to80=True)
```
Output:
Generates output files in the specified output directory.
"""
# Create dataset directory
save_dir = increment_path(save_dir) # increment if save directory already exists
for p in save_dir / "labels", save_dir / "images":
p.mkdir(parents=True, exist_ok=True) # make dir
# Convert classes
coco80 = coco91_to_coco80_class()
# Import json
for json_file in sorted(Path(labels_dir).resolve().glob("*.json")):
fn = Path(save_dir) / "labels" / json_file.stem.replace("instances_", "") # folder name
fn.mkdir(parents=True, exist_ok=True)
with open(json_file) as f:
data = json.load(f)
# Create image dict
images = {f'{x["id"]:d}': x for x in data["images"]}
# Create image-annotations dict
imgToAnns = defaultdict(list)
for ann in data["annotations"]:
imgToAnns[ann["image_id"]].append(ann)
# Write labels file
for img_id, anns in TQDM(imgToAnns.items(), desc=f"Annotations {json_file}"):
img = images[f"{img_id:d}"]
h, w, f = img["height"], img["width"], img["file_name"]
bboxes = []
segments = []
keypoints = []
for ann in anns:
if ann["iscrowd"]:
continue
# The COCO box format is [top left x, top left y, width, height]
box = np.array(ann["bbox"], dtype=np.float64)
box[:2] += box[2:] / 2 # xy top-left corner to center
box[[0, 2]] /= w # normalize x
box[[1, 3]] /= h # normalize y
if box[2] <= 0 or box[3] <= 0: # if w <= 0 and h <= 0
continue
cls = coco80[ann["category_id"] - 1] if cls91to80 else ann["category_id"] - 1 # class
box = [cls] + box.tolist()
if box not in bboxes:
bboxes.append(box)
if use_segments and ann.get("segmentation") is not None:
if len(ann["segmentation"]) == 0:
segments.append([])
continue
elif len(ann["segmentation"]) > 1:
s = merge_multi_segment(ann["segmentation"])
s = (np.concatenate(s, axis=0) / np.array([w, h])).reshape(-1).tolist()
else:
s = [j for i in ann["segmentation"] for j in i] # all segments concatenated
s = (np.array(s).reshape(-1, 2) / np.array([w, h])).reshape(-1).tolist()
s = [cls] + s
segments.append(s)
if use_keypoints and ann.get("keypoints") is not None:
keypoints.append(
box + (np.array(ann["keypoints"]).reshape(-1, 3) / np.array([w, h, 1])).reshape(-1).tolist()
)
# Write
with open((fn / f).with_suffix(".txt"), "a") as file:
for i in range(len(bboxes)):
if use_keypoints:
line = (*(keypoints[i]),) # cls, box, keypoints
else:
line = (
*(segments[i] if use_segments and len(segments[i]) > 0 else bboxes[i]),
) # cls, box or segments
file.write(("%g " * len(line)).rstrip() % line + "\n")
LOGGER.info(f"COCO data converted successfully.\nResults saved to {save_dir.resolve()}")
def convert_dota_to_yolo_obb(dota_root_path: str):
"""
Converts DOTA dataset annotations to YOLO OBB (Oriented Bounding Box) format.
The function processes images in the 'train' and 'val' folders of the DOTA dataset. For each image, it reads the
associated label from the original labels directory and writes new labels in YOLO OBB format to a new directory.
Args:
dota_root_path (str): The root directory path of the DOTA dataset.
Example:
```python
from ultralytics.data.converter import convert_dota_to_yolo_obb
convert_dota_to_yolo_obb('path/to/DOTA')
```
Notes:
The directory structure assumed for the DOTA dataset:
- DOTA
├─ images
│ ├─ train
│ └─ val
└─ labels
├─ train_original
└─ val_original
After execution, the function will organize the labels into:
- DOTA
└─ labels
├─ train
└─ val
"""
dota_root_path = Path(dota_root_path)
# Class names to indices mapping
class_mapping = {
"plane": 0,
"ship": 1,
"storage-tank": 2,
"baseball-diamond": 3,
"tennis-court": 4,
"basketball-court": 5,
"ground-track-field": 6,
"harbor": 7,
"bridge": 8,
"large-vehicle": 9,
"small-vehicle": 10,
"helicopter": 11,
"roundabout": 12,
"soccer-ball-field": 13,
"swimming-pool": 14,
"container-crane": 15,
"airport": 16,
"helipad": 17,
}
def convert_label(image_name, image_width, image_height, orig_label_dir, save_dir):
"""Converts a single image's DOTA annotation to YOLO OBB format and saves it to a specified directory."""
orig_label_path = orig_label_dir / f"{image_name}.txt"
save_path = save_dir / f"{image_name}.txt"
with orig_label_path.open("r") as f, save_path.open("w") as g:
lines = f.readlines()
for line in lines:
parts = line.strip().split()
if len(parts) < 9:
continue
class_name = parts[8]
class_idx = class_mapping[class_name]
coords = [float(p) for p in parts[:8]]
normalized_coords = [
coords[i] / image_width if i % 2 == 0 else coords[i] / image_height for i in range(8)
]
formatted_coords = ["{:.6g}".format(coord) for coord in normalized_coords]
g.write(f"{class_idx} {' '.join(formatted_coords)}\n")
for phase in ["train", "val"]:
image_dir = dota_root_path / "images" / phase
orig_label_dir = dota_root_path / "labels" / f"{phase}_original"
save_dir = dota_root_path / "labels" / phase
save_dir.mkdir(parents=True, exist_ok=True)
image_paths = list(image_dir.iterdir())
for image_path in TQDM(image_paths, desc=f"Processing {phase} images"):
if image_path.suffix != ".png":
continue
image_name_without_ext = image_path.stem
img = cv2.imread(str(image_path))
h, w = img.shape[:2]
convert_label(image_name_without_ext, w, h, orig_label_dir, save_dir)
def min_index(arr1, arr2):
"""
Find a pair of indexes with the shortest distance between two arrays of 2D points.
Args:
arr1 (np.ndarray): A NumPy array of shape (N, 2) representing N 2D points.
arr2 (np.ndarray): A NumPy array of shape (M, 2) representing M 2D points.
Returns:
(tuple): A tuple containing the indexes of the points with the shortest distance in arr1 and arr2 respectively.
"""
dis = ((arr1[:, None, :] - arr2[None, :, :]) ** 2).sum(-1)
return np.unravel_index(np.argmin(dis, axis=None), dis.shape)
def merge_multi_segment(segments):
"""
Merge multiple segments into one list by connecting the coordinates with the minimum distance between each segment.
This function connects these coordinates with a thin line to merge all segments into one.
Args:
segments (List[List]): Original segmentations in COCO's JSON file.
Each element is a list of coordinates, like [segmentation1, segmentation2,...].
Returns:
s (List[np.ndarray]): A list of connected segments represented as NumPy arrays.
"""
s = []
segments = [np.array(i).reshape(-1, 2) for i in segments]
idx_list = [[] for _ in range(len(segments))]
# Record the indexes with min distance between each segment
for i in range(1, len(segments)):
idx1, idx2 = min_index(segments[i - 1], segments[i])
idx_list[i - 1].append(idx1)
idx_list[i].append(idx2)
# Use two round to connect all the segments
for k in range(2):
# Forward connection
if k == 0:
for i, idx in enumerate(idx_list):
# Middle segments have two indexes, reverse the index of middle segments
if len(idx) == 2 and idx[0] > idx[1]:
idx = idx[::-1]
segments[i] = segments[i][::-1, :]
segments[i] = np.roll(segments[i], -idx[0], axis=0)
segments[i] = np.concatenate([segments[i], segments[i][:1]])
# Deal with the first segment and the last one
if i in [0, len(idx_list) - 1]:
s.append(segments[i])
else:
idx = [0, idx[1] - idx[0]]
s.append(segments[i][idx[0] : idx[1] + 1])
else:
for i in range(len(idx_list) - 1, -1, -1):
if i not in [0, len(idx_list) - 1]:
idx = idx_list[i]
nidx = abs(idx[1] - idx[0])
s.append(segments[i][nidx:])
return s
def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
"""
Converts existing object detection dataset (bounding boxes) to segmentation dataset or oriented bounding box (OBB)
in YOLO format. Generates segmentation data using SAM auto-annotator as needed.
Args:
im_dir (str | Path): Path to image directory to convert.
save_dir (str | Path): Path to save the generated labels, labels will be saved
into `labels-segment` in the same directory level of `im_dir` if save_dir is None. Default: None.
sam_model (str): Segmentation model to use for intermediate segmentation data; optional.
Notes:
The input directory structure assumed for dataset:
- im_dir
├─ 001.jpg
├─ ..
└─ NNN.jpg
- labels
├─ 001.txt
├─ ..
└─ NNN.txt
"""
from ultralytics.data import YOLODataset
from ultralytics.utils.ops import xywh2xyxy
from ultralytics.utils import LOGGER
from ultralytics import SAM
from tqdm import tqdm
# NOTE: add placeholder to pass class index check
dataset = YOLODataset(im_dir, data=dict(names=list(range(1000))))
if len(dataset.labels[0]["segments"]) > 0: # if it's segment data
LOGGER.info("Segmentation labels detected, no need to generate new ones!")
return
LOGGER.info("Detection labels detected, generating segment labels by SAM model!")
sam_model = SAM(sam_model)
for l in tqdm(dataset.labels, total=len(dataset.labels), desc="Generating segment labels"):
h, w = l["shape"]
boxes = l["bboxes"]
if len(boxes) == 0: # skip empty labels
continue
boxes[:, [0, 2]] *= w
boxes[:, [1, 3]] *= h
im = cv2.imread(l["im_file"])
sam_results = sam_model(im, bboxes=xywh2xyxy(boxes), verbose=False, save=False)
l["segments"] = sam_results[0].masks.xyn
save_dir = Path(save_dir) if save_dir else Path(im_dir).parent / "labels-segment"
save_dir.mkdir(parents=True, exist_ok=True)
for l in dataset.labels:
texts = []
lb_name = Path(l["im_file"]).with_suffix(".txt").name
txt_file = save_dir / lb_name
cls = l["cls"]
for i, s in enumerate(l["segments"]):
line = (int(cls[i]), *s.reshape(-1))
texts.append(("%g " * len(line)).rstrip() % line)
if texts:
with open(txt_file, "a") as f:
f.writelines(text + "\n" for text in texts)
LOGGER.info(f"Generated segment labels saved in {save_dir}")
| 16,503 | Python | .py | 481 | 23.995842 | 120 | 0.522022 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,798 | annotator.py | arojsubedi_Improved-YOLOv8s/ultralytics/data/annotator.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
from pathlib import Path
from ultralytics import SAM, YOLO
def auto_annotate(data, det_model="yolov8x.pt", sam_model="sam_b.pt", device="", output_dir=None):
"""
Automatically annotates images using a YOLO object detection model and a SAM segmentation model.
Args:
data (str): Path to a folder containing images to be annotated.
det_model (str, optional): Pre-trained YOLO detection model. Defaults to 'yolov8x.pt'.
sam_model (str, optional): Pre-trained SAM segmentation model. Defaults to 'sam_b.pt'.
device (str, optional): Device to run the models on. Defaults to an empty string (CPU or GPU, if available).
output_dir (str | None | optional): Directory to save the annotated results.
Defaults to a 'labels' folder in the same directory as 'data'.
Example:
```python
from ultralytics.data.annotator import auto_annotate
auto_annotate(data='ultralytics/assets', det_model='yolov8n.pt', sam_model='mobile_sam.pt')
```
"""
det_model = YOLO(det_model)
sam_model = SAM(sam_model)
data = Path(data)
if not output_dir:
output_dir = data.parent / f"{data.stem}_auto_annotate_labels"
Path(output_dir).mkdir(exist_ok=True, parents=True)
det_results = det_model(data, stream=True, device=device)
for result in det_results:
class_ids = result.boxes.cls.int().tolist() # noqa
if len(class_ids):
boxes = result.boxes.xyxy # Boxes object for bbox outputs
sam_results = sam_model(result.orig_img, bboxes=boxes, verbose=False, save=False, device=device)
segments = sam_results[0].masks.xyn # noqa
with open(f"{Path(output_dir) / Path(result.path).stem}.txt", "w") as f:
for i in range(len(segments)):
s = segments[i]
if len(s) == 0:
continue
segment = map(str, segments[i].reshape(-1).tolist())
f.write(f"{class_ids[i]} " + " ".join(segment) + "\n")
| 2,117 | Python | .py | 39 | 44.692308 | 116 | 0.629898 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,799 | utils.py | arojsubedi_Improved-YOLOv8s/ultralytics/data/utils.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import contextlib
import hashlib
import json
import os
import random
import subprocess
import time
import zipfile
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tarfile import is_tarfile
import cv2
import numpy as np
from PIL import Image, ImageOps
from ultralytics.nn.autobackend import check_class_names
from ultralytics.utils import (
DATASETS_DIR,
LOGGER,
NUM_THREADS,
ROOT,
SETTINGS_YAML,
TQDM,
clean_url,
colorstr,
emojis,
yaml_load,
yaml_save,
)
from ultralytics.utils.checks import check_file, check_font, is_ascii
from ultralytics.utils.downloads import download, safe_download, unzip_file
from ultralytics.utils.ops import segments2boxes
HELP_URL = "See https://docs.ultralytics.com/datasets/detect for dataset formatting guidance."
IMG_FORMATS = "bmp", "dng", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp", "pfm" # image suffixes
VID_FORMATS = "asf", "avi", "gif", "m4v", "mkv", "mov", "mp4", "mpeg", "mpg", "ts", "wmv", "webm" # video suffixes
PIN_MEMORY = str(os.getenv("PIN_MEMORY", True)).lower() == "true" # global pin_memory for dataloaders
def img2label_paths(img_paths):
"""Define label paths as a function of image paths."""
sa, sb = f"{os.sep}images{os.sep}", f"{os.sep}labels{os.sep}" # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit(".", 1)[0] + ".txt" for x in img_paths]
def get_hash(paths):
"""Returns a single hash value of a list of paths (files or dirs)."""
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.sha256(str(size).encode()) # hash sizes
h.update("".join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img: Image.Image):
"""Returns exif-corrected PIL size."""
s = img.size # (width, height)
if img.format == "JPEG": # only support JPEG images
with contextlib.suppress(Exception):
exif = img.getexif()
if exif:
rotation = exif.get(274, None) # the EXIF key for the orientation tag is 274
if rotation in [6, 8]: # rotation 270 or 90
s = s[1], s[0]
return s
def verify_image(args):
"""Verify one image."""
(im_file, cls), prefix = args
# Number (found, corrupt), message
nf, nc, msg = 0, 0, ""
try:
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
shape = (shape[1], shape[0]) # hw
assert (shape[0] > 9) & (shape[1] > 9), f"image size {shape} <10 pixels"
assert im.format.lower() in IMG_FORMATS, f"invalid image format {im.format}"
if im.format.lower() in ("jpg", "jpeg"):
with open(im_file, "rb") as f:
f.seek(-2, 2)
if f.read() != b"\xff\xd9": # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, "JPEG", subsampling=0, quality=100)
msg = f"{prefix}WARNING ⚠� {im_file}: corrupt JPEG restored and saved"
nf = 1
except Exception as e:
nc = 1
msg = f"{prefix}WARNING ⚠� {im_file}: ignoring corrupt image/label: {e}"
return (im_file, cls), nf, nc, msg
def verify_image_label(args):
"""Verify one image-label pair."""
im_file, lb_file, prefix, keypoint, num_cls, nkpt, ndim = args
# Number (missing, found, empty, corrupt), message, segments, keypoints
nm, nf, ne, nc, msg, segments, keypoints = 0, 0, 0, 0, "", [], None
try:
# Verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
shape = (shape[1], shape[0]) # hw
assert (shape[0] > 9) & (shape[1] > 9), f"image size {shape} <10 pixels"
assert im.format.lower() in IMG_FORMATS, f"invalid image format {im.format}"
if im.format.lower() in ("jpg", "jpeg"):
with open(im_file, "rb") as f:
f.seek(-2, 2)
if f.read() != b"\xff\xd9": # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, "JPEG", subsampling=0, quality=100)
msg = f"{prefix}WARNING ⚠� {im_file}: corrupt JPEG restored and saved"
# Verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file) as f:
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any(len(x) > 6 for x in lb) and (not keypoint): # is segment
classes = np.array([x[0] for x in lb], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
lb = np.array(lb, dtype=np.float32)
nl = len(lb)
if nl:
if keypoint:
assert lb.shape[1] == (5 + nkpt * ndim), f"labels require {(5 + nkpt * ndim)} columns each"
points = lb[:, 5:].reshape(-1, ndim)[:, :2]
else:
assert lb.shape[1] == 5, f"labels require 5 columns, {lb.shape[1]} columns detected"
points = lb[:, 1:]
assert points.max() <= 1, f"non-normalized or out of bounds coordinates {points[points > 1]}"
assert lb.min() >= 0, f"negative label values {lb[lb < 0]}"
# All labels
max_cls = lb[:, 0].max() # max label count
assert max_cls <= num_cls, (
f"Label class {int(max_cls)} exceeds dataset class count {num_cls}. "
f"Possible class labels are 0-{num_cls - 1}"
)
_, i = np.unique(lb, axis=0, return_index=True)
if len(i) < nl: # duplicate row check
lb = lb[i] # remove duplicates
if segments:
segments = [segments[x] for x in i]
msg = f"{prefix}WARNING ⚠� {im_file}: {nl - len(i)} duplicate labels removed"
else:
ne = 1 # label empty
lb = np.zeros((0, (5 + nkpt * ndim) if keypoint else 5), dtype=np.float32)
else:
nm = 1 # label missing
lb = np.zeros((0, (5 + nkpt * ndim) if keypoints else 5), dtype=np.float32)
if keypoint:
keypoints = lb[:, 5:].reshape(-1, nkpt, ndim)
if ndim == 2:
kpt_mask = np.where((keypoints[..., 0] < 0) | (keypoints[..., 1] < 0), 0.0, 1.0).astype(np.float32)
keypoints = np.concatenate([keypoints, kpt_mask[..., None]], axis=-1) # (nl, nkpt, 3)
lb = lb[:, :5]
return im_file, lb, shape, segments, keypoints, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f"{prefix}WARNING ⚠� {im_file}: ignoring corrupt image/label: {e}"
return [None, None, None, None, None, nm, nf, ne, nc, msg]
def polygon2mask(imgsz, polygons, color=1, downsample_ratio=1):
"""
Convert a list of polygons to a binary mask of the specified image size.
Args:
imgsz (tuple): The size of the image as (height, width).
polygons (list[np.ndarray]): A list of polygons. Each polygon is an array with shape [N, M], where
N is the number of polygons, and M is the number of points such that M % 2 = 0.
color (int, optional): The color value to fill in the polygons on the mask. Defaults to 1.
downsample_ratio (int, optional): Factor by which to downsample the mask. Defaults to 1.
Returns:
(np.ndarray): A binary mask of the specified image size with the polygons filled in.
"""
mask = np.zeros(imgsz, dtype=np.uint8)
polygons = np.asarray(polygons, dtype=np.int32)
polygons = polygons.reshape((polygons.shape[0], -1, 2))
cv2.fillPoly(mask, polygons, color=color)
nh, nw = (imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio)
# Note: fillPoly first then resize is trying to keep the same loss calculation method when mask-ratio=1
return cv2.resize(mask, (nw, nh))
def polygons2masks(imgsz, polygons, color, downsample_ratio=1):
"""
Convert a list of polygons to a set of binary masks of the specified image size.
Args:
imgsz (tuple): The size of the image as (height, width).
polygons (list[np.ndarray]): A list of polygons. Each polygon is an array with shape [N, M], where
N is the number of polygons, and M is the number of points such that M % 2 = 0.
color (int): The color value to fill in the polygons on the masks.
downsample_ratio (int, optional): Factor by which to downsample each mask. Defaults to 1.
Returns:
(np.ndarray): A set of binary masks of the specified image size with the polygons filled in.
"""
return np.array([polygon2mask(imgsz, [x.reshape(-1)], color, downsample_ratio) for x in polygons])
def polygons2masks_overlap(imgsz, segments, downsample_ratio=1):
"""Return a (640, 640) overlap mask."""
masks = np.zeros(
(imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio),
dtype=np.int32 if len(segments) > 255 else np.uint8,
)
areas = []
ms = []
for si in range(len(segments)):
mask = polygon2mask(imgsz, [segments[si].reshape(-1)], downsample_ratio=downsample_ratio, color=1)
ms.append(mask)
areas.append(mask.sum())
areas = np.asarray(areas)
index = np.argsort(-areas)
ms = np.array(ms)[index]
for i in range(len(segments)):
mask = ms[i] * (i + 1)
masks = masks + mask
masks = np.clip(masks, a_min=0, a_max=i + 1)
return masks, index
def find_dataset_yaml(path: Path) -> Path:
"""
Find and return the YAML file associated with a Detect, Segment or Pose dataset.
This function searches for a YAML file at the root level of the provided directory first, and if not found, it
performs a recursive search. It prefers YAML files that have the same stem as the provided path. An AssertionError
is raised if no YAML file is found or if multiple YAML files are found.
Args:
path (Path): The directory path to search for the YAML file.
Returns:
(Path): The path of the found YAML file.
"""
files = list(path.glob("*.yaml")) or list(path.rglob("*.yaml")) # try root level first and then recursive
assert files, f"No YAML file found in '{path.resolve()}'"
if len(files) > 1:
files = [f for f in files if f.stem == path.stem] # prefer *.yaml files that match
assert len(files) == 1, f"Expected 1 YAML file in '{path.resolve()}', but found {len(files)}.\n{files}"
return files[0]
def check_det_dataset(dataset, autodownload=True):
"""
Download, verify, and/or unzip a dataset if not found locally.
This function checks the availability of a specified dataset, and if not found, it has the option to download and
unzip the dataset. It then reads and parses the accompanying YAML data, ensuring key requirements are met and also
resolves paths related to the dataset.
Args:
dataset (str): Path to the dataset or dataset descriptor (like a YAML file).
autodownload (bool, optional): Whether to automatically download the dataset if not found. Defaults to True.
Returns:
(dict): Parsed dataset information and paths.
"""
file = check_file(dataset)
# Download (optional)
extract_dir = ""
if zipfile.is_zipfile(file) or is_tarfile(file):
new_dir = safe_download(file, dir=DATASETS_DIR, unzip=True, delete=False)
file = find_dataset_yaml(DATASETS_DIR / new_dir)
extract_dir, autodownload = file.parent, False
# Read YAML
data = yaml_load(file, append_filename=True) # dictionary
# Checks
for k in "train", "val":
if k not in data:
if k != "val" or "validation" not in data:
raise SyntaxError(
emojis(f"{dataset} '{k}:' key missing �.\n'train' and 'val' are required in all data YAMLs.")
)
LOGGER.info("WARNING ⚠� renaming data YAML 'validation' key to 'val' to match YOLO format.")
data["val"] = data.pop("validation") # replace 'validation' key with 'val' key
if "names" not in data and "nc" not in data:
raise SyntaxError(emojis(f"{dataset} key missing �.\n either 'names' or 'nc' are required in all data YAMLs."))
if "names" in data and "nc" in data and len(data["names"]) != data["nc"]:
raise SyntaxError(emojis(f"{dataset} 'names' length {len(data['names'])} and 'nc: {data['nc']}' must match."))
if "names" not in data:
data["names"] = [f"class_{i}" for i in range(data["nc"])]
else:
data["nc"] = len(data["names"])
data["names"] = check_class_names(data["names"])
# Resolve paths
path = Path(extract_dir or data.get("path") or Path(data.get("yaml_file", "")).parent) # dataset root
if not path.is_absolute():
path = (DATASETS_DIR / path).resolve()
# Set paths
data["path"] = path # download scripts
for k in "train", "val", "test":
if data.get(k): # prepend path
if isinstance(data[k], str):
x = (path / data[k]).resolve()
if not x.exists() and data[k].startswith("../"):
x = (path / data[k][3:]).resolve()
data[k] = str(x)
else:
data[k] = [str((path / x).resolve()) for x in data[k]]
# Parse YAML
val, s = (data.get(x) for x in ("val", "download"))
if val:
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
name = clean_url(dataset) # dataset name with URL auth stripped
m = f"\nDataset '{name}' images not found ⚠�, missing path '{[x for x in val if not x.exists()][0]}'"
if s and autodownload:
LOGGER.warning(m)
else:
m += f"\nNote dataset download directory is '{DATASETS_DIR}'. You can update this in '{SETTINGS_YAML}'"
raise FileNotFoundError(m)
t = time.time()
r = None # success
if s.startswith("http") and s.endswith(".zip"): # URL
safe_download(url=s, dir=DATASETS_DIR, delete=True)
elif s.startswith("bash "): # bash script
LOGGER.info(f"Running {s} ...")
r = os.system(s)
else: # python script
exec(s, {"yaml": data})
dt = f"({round(time.time() - t, 1)}s)"
s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} �"
LOGGER.info(f"Dataset download {s}\n")
check_font("Arial.ttf" if is_ascii(data["names"]) else "Arial.Unicode.ttf") # download fonts
return data # dictionary
def check_cls_dataset(dataset, split=""):
"""
Checks a classification dataset such as Imagenet.
This function accepts a `dataset` name and attempts to retrieve the corresponding dataset information.
If the dataset is not found locally, it attempts to download the dataset from the internet and save it locally.
Args:
dataset (str | Path): The name of the dataset.
split (str, optional): The split of the dataset. Either 'val', 'test', or ''. Defaults to ''.
Returns:
(dict): A dictionary containing the following keys:
- 'train' (Path): The directory path containing the training set of the dataset.
- 'val' (Path): The directory path containing the validation set of the dataset.
- 'test' (Path): The directory path containing the test set of the dataset.
- 'nc' (int): The number of classes in the dataset.
- 'names' (dict): A dictionary of class names in the dataset.
"""
# Download (optional if dataset=https://file.zip is passed directly)
if str(dataset).startswith(("http:/", "https:/")):
dataset = safe_download(dataset, dir=DATASETS_DIR, unzip=True, delete=False)
elif Path(dataset).suffix in (".zip", ".tar", ".gz"):
file = check_file(dataset)
dataset = safe_download(file, dir=DATASETS_DIR, unzip=True, delete=False)
dataset = Path(dataset)
data_dir = (dataset if dataset.is_dir() else (DATASETS_DIR / dataset)).resolve()
if not data_dir.is_dir():
LOGGER.warning(f"\nDataset not found ⚠�, missing path {data_dir}, attempting download...")
t = time.time()
if str(dataset) == "imagenet":
subprocess.run(f"bash {ROOT / 'data/scripts/get_imagenet.sh'}", shell=True, check=True)
else:
url = f"https://github.com/ultralytics/yolov5/releases/download/v1.0/{dataset}.zip"
download(url, dir=data_dir.parent)
s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n"
LOGGER.info(s)
train_set = data_dir / "train"
val_set = (
data_dir / "val"
if (data_dir / "val").exists()
else data_dir / "validation"
if (data_dir / "validation").exists()
else None
) # data/test or data/val
test_set = data_dir / "test" if (data_dir / "test").exists() else None # data/val or data/test
if split == "val" and not val_set:
LOGGER.warning("WARNING ⚠� Dataset 'split=val' not found, using 'split=test' instead.")
elif split == "test" and not test_set:
LOGGER.warning("WARNING ⚠� Dataset 'split=test' not found, using 'split=val' instead.")
nc = len([x for x in (data_dir / "train").glob("*") if x.is_dir()]) # number of classes
names = [x.name for x in (data_dir / "train").iterdir() if x.is_dir()] # class names list
names = dict(enumerate(sorted(names)))
# Print to console
for k, v in {"train": train_set, "val": val_set, "test": test_set}.items():
prefix = f'{colorstr(f"{k}:")} {v}...'
if v is None:
LOGGER.info(prefix)
else:
files = [path for path in v.rglob("*.*") if path.suffix[1:].lower() in IMG_FORMATS]
nf = len(files) # number of files
nd = len({file.parent for file in files}) # number of directories
if nf == 0:
if k == "train":
raise FileNotFoundError(emojis(f"{dataset} '{k}:' no training images found � "))
else:
LOGGER.warning(f"{prefix} found {nf} images in {nd} classes: WARNING ⚠� no images found")
elif nd != nc:
LOGGER.warning(f"{prefix} found {nf} images in {nd} classes: ERROR �� requires {nc} classes, not {nd}")
else:
LOGGER.info(f"{prefix} found {nf} images in {nd} classes ✅ ")
return {"train": train_set, "val": val_set, "test": test_set, "nc": nc, "names": names}
class HUBDatasetStats:
"""
A class for generating HUB dataset JSON and `-hub` dataset directory.
Args:
path (str): Path to data.yaml or data.zip (with data.yaml inside data.zip). Default is 'coco8.yaml'.
task (str): Dataset task. Options are 'detect', 'segment', 'pose', 'classify'. Default is 'detect'.
autodownload (bool): Attempt to download dataset if not found locally. Default is False.
Example:
Download *.zip files from https://github.com/ultralytics/hub/tree/main/example_datasets
i.e. https://github.com/ultralytics/hub/raw/main/example_datasets/coco8.zip for coco8.zip.
```python
from ultralytics.data.utils import HUBDatasetStats
stats = HUBDatasetStats('path/to/coco8.zip', task='detect') # detect dataset
stats = HUBDatasetStats('path/to/coco8-seg.zip', task='segment') # segment dataset
stats = HUBDatasetStats('path/to/coco8-pose.zip', task='pose') # pose dataset
stats = HUBDatasetStats('path/to/imagenet10.zip', task='classify') # classification dataset
stats.get_json(save=True)
stats.process_images()
```
"""
def __init__(self, path="coco8.yaml", task="detect", autodownload=False):
"""Initialize class."""
path = Path(path).resolve()
LOGGER.info(f"Starting HUB dataset checks for {path}....")
self.task = task # detect, segment, pose, classify
if self.task == "classify":
unzip_dir = unzip_file(path)
data = check_cls_dataset(unzip_dir)
data["path"] = unzip_dir
else: # detect, segment, pose
_, data_dir, yaml_path = self._unzip(Path(path))
try:
# Load YAML with checks
data = yaml_load(yaml_path)
data["path"] = "" # strip path since YAML should be in dataset root for all HUB datasets
yaml_save(yaml_path, data)
data = check_det_dataset(yaml_path, autodownload) # dict
data["path"] = data_dir # YAML path should be set to '' (relative) or parent (absolute)
except Exception as e:
raise Exception("error/HUB/dataset_stats/init") from e
self.hub_dir = Path(f'{data["path"]}-hub')
self.im_dir = self.hub_dir / "images"
self.stats = {"nc": len(data["names"]), "names": list(data["names"].values())} # statistics dictionary
self.data = data
@staticmethod
def _unzip(path):
"""Unzip data.zip."""
if not str(path).endswith(".zip"): # path is data.yaml
return False, None, path
unzip_dir = unzip_file(path, path=path.parent)
assert unzip_dir.is_dir(), (
f"Error unzipping {path}, {unzip_dir} not found. " f"path/to/abc.zip MUST unzip to path/to/abc/"
)
return True, str(unzip_dir), find_dataset_yaml(unzip_dir) # zipped, data_dir, yaml_path
def _hub_ops(self, f):
"""Saves a compressed image for HUB previews."""
compress_one_image(f, self.im_dir / Path(f).name) # save to dataset-hub
def get_json(self, save=False, verbose=False):
"""Return dataset JSON for Ultralytics HUB."""
def _round(labels):
"""Update labels to integer class and 4 decimal place floats."""
if self.task == "detect":
coordinates = labels["bboxes"]
elif self.task == "segment":
coordinates = [x.flatten() for x in labels["segments"]]
elif self.task == "pose":
n = labels["keypoints"].shape[0]
coordinates = np.concatenate((labels["bboxes"], labels["keypoints"].reshape(n, -1)), 1)
else:
raise ValueError("Undefined dataset task.")
zipped = zip(labels["cls"], coordinates)
return [[int(c[0]), *(round(float(x), 4) for x in points)] for c, points in zipped]
for split in "train", "val", "test":
self.stats[split] = None # predefine
path = self.data.get(split)
# Check split
if path is None: # no split
continue
files = [f for f in Path(path).rglob("*.*") if f.suffix[1:].lower() in IMG_FORMATS] # image files in split
if not files: # no images
continue
# Get dataset statistics
if self.task == "classify":
from torchvision.datasets import ImageFolder
dataset = ImageFolder(self.data[split])
x = np.zeros(len(dataset.classes)).astype(int)
for im in dataset.imgs:
x[im[1]] += 1
self.stats[split] = {
"instance_stats": {"total": len(dataset), "per_class": x.tolist()},
"image_stats": {"total": len(dataset), "unlabelled": 0, "per_class": x.tolist()},
"labels": [{Path(k).name: v} for k, v in dataset.imgs],
}
else:
from ultralytics.data import YOLODataset
dataset = YOLODataset(img_path=self.data[split], data=self.data, task=self.task)
x = np.array(
[
np.bincount(label["cls"].astype(int).flatten(), minlength=self.data["nc"])
for label in TQDM(dataset.labels, total=len(dataset), desc="Statistics")
]
) # shape(128x80)
self.stats[split] = {
"instance_stats": {"total": int(x.sum()), "per_class": x.sum(0).tolist()},
"image_stats": {
"total": len(dataset),
"unlabelled": int(np.all(x == 0, 1).sum()),
"per_class": (x > 0).sum(0).tolist(),
},
"labels": [{Path(k).name: _round(v)} for k, v in zip(dataset.im_files, dataset.labels)],
}
# Save, print and return
if save:
self.hub_dir.mkdir(parents=True, exist_ok=True) # makes dataset-hub/
stats_path = self.hub_dir / "stats.json"
LOGGER.info(f"Saving {stats_path.resolve()}...")
with open(stats_path, "w") as f:
json.dump(self.stats, f) # save stats.json
if verbose:
LOGGER.info(json.dumps(self.stats, indent=2, sort_keys=False))
return self.stats
def process_images(self):
"""Compress images for Ultralytics HUB."""
from ultralytics.data import YOLODataset # ClassificationDataset
self.im_dir.mkdir(parents=True, exist_ok=True) # makes dataset-hub/images/
for split in "train", "val", "test":
if self.data.get(split) is None:
continue
dataset = YOLODataset(img_path=self.data[split], data=self.data)
with ThreadPool(NUM_THREADS) as pool:
for _ in TQDM(pool.imap(self._hub_ops, dataset.im_files), total=len(dataset), desc=f"{split} images"):
pass
LOGGER.info(f"Done. All images saved to {self.im_dir}")
return self.im_dir
def compress_one_image(f, f_new=None, max_dim=1920, quality=50):
"""
Compresses a single image file to reduced size while preserving its aspect ratio and quality using either the Python
Imaging Library (PIL) or OpenCV library. If the input image is smaller than the maximum dimension, it will not be
resized.
Args:
f (str): The path to the input image file.
f_new (str, optional): The path to the output image file. If not specified, the input file will be overwritten.
max_dim (int, optional): The maximum dimension (width or height) of the output image. Default is 1920 pixels.
quality (int, optional): The image compression quality as a percentage. Default is 50%.
Example:
```python
from pathlib import Path
from ultralytics.data.utils import compress_one_image
for f in Path('path/to/dataset').rglob('*.jpg'):
compress_one_image(f)
```
"""
try: # use PIL
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(f_new or f, "JPEG", quality=quality, optimize=True) # save
except Exception as e: # use OpenCV
LOGGER.info(f"WARNING ⚠� HUB ops PIL failure {f}: {e}")
im = cv2.imread(f)
im_height, im_width = im.shape[:2]
r = max_dim / max(im_height, im_width) # ratio
if r < 1.0: # image too large
im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA)
cv2.imwrite(str(f_new or f), im)
def autosplit(path=DATASETS_DIR / "coco8/images", weights=(0.9, 0.1, 0.0), annotated_only=False):
"""
Automatically split a dataset into train/val/test splits and save the resulting splits into autosplit_*.txt files.
Args:
path (Path, optional): Path to images directory. Defaults to DATASETS_DIR / 'coco8/images'.
weights (list | tuple, optional): Train, validation, and test split fractions. Defaults to (0.9, 0.1, 0.0).
annotated_only (bool, optional): If True, only images with an associated txt file are used. Defaults to False.
Example:
```python
from ultralytics.data.utils import autosplit
autosplit()
```
"""
path = Path(path) # images dir
files = sorted(x for x in path.rglob("*.*") if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ["autosplit_train.txt", "autosplit_val.txt", "autosplit_test.txt"] # 3 txt files
for x in txt:
if (path.parent / x).exists():
(path.parent / x).unlink() # remove existing
LOGGER.info(f"Autosplitting images from {path}" + ", using *.txt labeled images only" * annotated_only)
for i, img in TQDM(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], "a") as f:
f.write(f"./{img.relative_to(path.parent).as_posix()}" + "\n") # add image to txt file
| 29,778 | Python | .py | 559 | 43.314848 | 123 | 0.59268 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.