code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import os
from typing import List, Dict
from zpy.utils.files import get_files_in, file_content_updater
from zpy.cli.agregations import add_config, add_context, add_use_case, add_route_event_manager
from zpy.cli.scaffolding import scaffolding_dispatcher
import click
@click.group()
def cli():
"""This is an example script to learn Click."""
...
@cli.command()
@click.option('--project', '-p', required=True, type=str, help='Project name')
@click.option('--verbose', '-v', required=False, is_flag=True, default=False, help='Show logs')
@click.option('--description', '-d', required=False, type=str, help='Project description',
default="This is very boring haha xd...")
@click.option('--developer', '-dev', required=False, type=str, help='Author', default="Zurck'z 2022")
@click.option('--context', '-c', required=False, type=str, help='Context name', default="Users")
@click.option('--use_case', '-uc', required=False, type=str, help='Use Case Name', default="GetUser")
@click.option('--developer_url', '-dev-url', required=False, type=str, help='Author url',
default="https://www.linkedin.com/in/zurckz/")
@click.option('--open_project', '-op', required=False, is_flag=True, default=False, help='Open project')
@click.option('--only_content', '-oc', required=False, is_flag=True, default=False,
help='Create only structure project')
def make(project, verbose, description, developer, context, use_case, developer_url, open_project,
only_content):
"""
Create new project
\f
@param project:
@param verbose:
@param description:
@param developer:
@param context:
@param use_case:
@param developer_url:
@param open_project:
@param only_content:
@return:
"""
in_data: List[Dict[str, str]] = [{"value": project, "title": "Project name:"},
{"value": description, "title": "Project description:"},
{"value": developer, "title": "Author:"},
{"value": developer_url, "title": "Url Author:"},
{"value": context.lower(), "title": "Context:"},
{"value": use_case, "title": "Use Case:"},
{"value": context.title(), "title": "Repository"},
{"value": open_project, "title": "Open Project?:"},
{"value": only_content, "title": "Only Content?:"},
None
]
if verbose is True:
click.echo(f'\N{grinning face} - Initializing for create new project...')
for data in in_data:
if data is not None:
click.echo(f'\N{grinning face} - With {data["title"]} {data["value"]}')
scaffolding_dispatcher(in_data, verbose, click)
@cli.group()
def add():
"""
Add new context, case or event mappers
\f
@return:
"""
...
@add.command()
@click.option('--name', '-n', required=True, type=str, is_flag=False, help='Bounded Context name')
@click.option('--use_case', '-uc', required=False, type=str, help='Use Case Name', default="GetUser")
@click.option('--force', '-f', required=False, type=bool, is_flag=True, help='Force creation, skipping project',
default=False)
def context(name: str, use_case: str, force: bool):
"""
Add new context to project.
\f
@param name:
@param use_case:
@param force:
@return:
"""
add_context(name, use_case, force)
@add.command()
@click.option('--name', '-n', required=True, type=str, is_flag=False, help='Use case name')
@click.option('--context', '-c', required=True, type=str, is_flag=False,
help='Context name where use case will be created')
# @click.option('--repository', '-r', required=False, type=str, help='Repository dependency', default="AwesomeRepository")
def case(name: str, context: str):
"""
Add new use case for specific context
\f
@param name:
@param context:
@return:
"""
add_use_case(name, context)
@add.command(name='event-mapper')
@click.option('--force', '-f', required=False, type=bool, is_flag=True,
help='Force creation, if exist the file will be replaced',
default=False)
def event_mapper_manager(force: bool):
"""
Create basic event mapper manager configuration
\f
@param force:
@param name:
@param context:
@return:
"""
add_route_event_manager(force)
@add.command()
@click.option('--database', '-db', required=True, type=bool, is_flag=True, help='Database configuration')
def config(database: bool):
"""
Add basic database configuration
\f
@param database:
@return:
"""
add_config(database)
@cli.command()
def drop():
"""
Not available yet.
\f
@return:
"""
click.echo('Dropping...')
@cli.command()
@click.option('--directory', '-d', required=False, type=str, is_flag=False, help='Directory', default='.')
@click.option('--extension', '-ex', required=True, type=str, help='File type')
@click.option('--find', '-fs', required=True, type=str, help='Find substring for replace')
@click.option('--new', '-ns', required=False, type=str, help='New substring. Use: [@EMPTY] for empty str',
default="[@EMPTY]")
def content_replace(directory: str, extension: str, find: str, new: str = '[@EMPTY]'):
"""
Replace specific string in files with provided extension
\f
@param directory:
@param extension:
@param find:
@param new:
@return:
"""
if directory == '.':
directory = os.getcwd()
if new in ['[@EMPTY]']:
new = ''
click.echo('Starting files content replacement...')
click.echo(f'Directory: {directory}')
click.echo(f'File Filtered by: {extension}')
click.echo(f'Value to replace: \'{find}\'')
click.echo(f'New value: \'{new}\'')
def custom_mutator(line: str) -> str:
if line and "from" in line and "src." in line:
return line.replace(find, new)
return line
for file in get_files_in(directory, extension, True):
file_content_updater(file, find=find, replaced=new) | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/cli/__init__.py | __init__.py |
adds = {
"di.py": {
"type": "file",
"module": "db-oracle",
"path": "src/di.py",
"code": {
"imports": [
"from zpy.utils import get_env_or_throw as var\n",
"from zdb.oracle import ZOracle, ZDBConfig\n"
],
"blocks": [
"\n",
"db_config: ZDBConfig = ZDBConfig(\"DB_USER\", \"DB_PASSWORD\", \"DB_NAME\",\"DB_HOST\", 1521,service=\"XE\")\n"
"db_mngr: ZOracle = ZOracle.setup_of(db_config)\n"
"\n",
"if var('ENVIRONMENT') == 'local':\n",
" # Setup only the environment is local.\n",
" db_mngr.init_local_client(path= var('ORACLE_CLIENT_PATH'))\n",
"\n"
]
}
},
"context_imports": {
"type": "file",
"module": "db-oracle",
"path": "src/di.py",
"code": {
"imports": [
"\n"
"# Imports of @context_name context π¦\n"
"from contexts.@context_name import @usecase_class, @repository_name, @repository_impl\n"
],
"blocks": [
"\n# Creation of instances of use cases, repositories etc. of the context: @context_name π¦\n",
"@context_name_repository: @repository_name = @repository_impl()\n"
"@usecase_var_uc: UseCase[Any, Any] = @usecase_class(@context_name_repository)\n"
"\n"
]
}
},
"use-cases": {
"type": "file",
"module": "cli",
"path": "src/di.py",
"code": {
"imports": [
"\n"
"# Import use case of @context_name context π¦\n"
"from src.contexts.@context_name.application.@usecase_file import @usecase_class\n"
],
"blocks": [
"\n# Creation instance of use cases of the context: @context_name π¦\n",
"@usecase_var: UseCase[Any, Any] = @usecase_class(@context_name_repository)\n"
"\n"
]
}
},
"routes-event-mappers": {
"type": "file",
"module": "cli",
"path": "src/api/config.py",
"code": {
"imports": [
"# πΈ Generated by zPy"
"\n"
"from zpy.api.flask.cloud_handlers import RouteEventMapperManager, RouteEventMapper as Route\n",
"from src.contexts.users.infrastructure.mutators import RatingDetailResponseMutator # β οΈ You need to define this\n"
],
"blocks": [
"\n# Event Route Manager configurations",
"""\n# π‘ Remove any configuration you don't need for each route
# π‘ If you don't use initializer remove param
event_mapper_manager = RouteEventMapperManager(initializer=lambda e, c: e) \\
.add_meta('your_shared_dependency', instance_or_value) \\
.add(Route(route="/users/{id}/ratings/{rating-id}")
.for_params(ParamsDecrypterMapper(path_params_keys=['id', 'rating-id']))
.for_request(BodyDecrypterMapper())
.for_response(BodyEncrypterMapper())
.with_meta('request_model', UpdateRatingRequestMutator)
.with_meta('response_model', RatingDetailResponseMutator)
.with_patch()) \\
.add(Route(route="/users/{id}/calls")
.for_params(ParamsDecrypterMapper(path_params_keys=['id']))
.with_get())"""
"\n"
]
}
}
} | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/cli/agregations/agregations.py | agregations.py |
import logging
import os
from typing import Any, List, Optional
from .agregations import adds
from zpy.cli.scaffolding import creator, place_holders
from zpy.cli.scaffolding.boilerplates import basic
from zpy.cli.utils import exist_folder, join_path, if_exist_move, camel_case
def get_last_import(lines):
index = 0
for i in range(0, len(lines)):
if "import" in lines[i] or "from" in lines[i]:
index = i
return index + 1
def add_context(name: str, use_case: str, force: bool):
current_directory = os.getcwd()
initial_dir = os.getcwd()
exist, current_directory = if_exist_move(current_directory, 'src')
exist, current_directory = if_exist_move(current_directory, 'contexts')
if exist is False and force is False:
print("Oops!! It seems you are not in a project generated by zpy...")
print("Can't find the src or contexts directory")
print("\nIf you want generate the context outside of project use the -f flag when add the context.\n")
print(f'\nzpy add context -n {name} -uc {use_case} -f\n')
return
in_data = [{"value": "", "title": "Project name:"},
{"value": "", "title": "Project description:"},
{"value": "", "title": "Author:"},
{"value": "", "title": "Url Author:"},
{"value": name.lower(), "title": "Context:"},
{"value": use_case, "title": "Use Case:"},
{"value": name.title(), "title": "Repository"},
{"value": "", "title": "Open Project?:"},
{"value": "", "title": "Only Content?:"},
current_directory
]
try:
creator(current_directory, basic['src']['child']['contexts']['child'], in_data, False, force)
except Exception as e:
print(e.args)
print("Can't create context...")
return
try:
file_writer(join_path(initial_dir, "src/di.py"), adds.get("context_imports").get("code"), in_data)
except:
print("Can't update dependencies file... Not Found!")
print(f"\nContext with name: {name} created successfully!\n")
def add_use_case(name: str, context: str):
current_directory = os.getcwd()
initial_dir = os.getcwd()
exist, current_directory = if_exist_move(current_directory, 'src')
exist, current_directory = if_exist_move(current_directory, 'contexts')
exist, current_directory = if_exist_move(current_directory, context)
exist, current_directory = if_exist_move(current_directory, 'application')
if exist is False:
print("Oops!! It seems you are not in a project generated by zpy...")
print(f"Can't find the src/contexts/{context}/application directory")
print("\nRemember that you need execute command in the root path of the project.")
return
in_data = [{"value": "", "title": "Project name:"},
{"value": "", "title": "Project description:"},
{"value": "", "title": "Author:"},
{"value": "", "title": "Url Author:"},
{"value": context.lower(), "title": "Context:"},
{"value": camel_case(name), "title": "Use Case:"},
{"value": context.title(), "title": "Repository"},
{"value": "", "title": "Open Project?:"},
{"value": "", "title": "Only Content?:"},
current_directory
]
try:
creator(current_directory,
basic['src']['child']['contexts']['child']['@context_name']['child']['application']['child'], in_data,
False, False)
except Exception as e:
print(e.args)
print("Can't create the use case...")
try:
file_writer(join_path(initial_dir, "src/di.py"), adds.get("use-cases").get("code"), in_data)
except:
print("Can't update dependencies file... Not Found!")
print(f"\nUseCase with name: {name} created successfully!\n")
def add_config(db):
print("Add database configuration...")
di_path = "./src/di.py"
codes = adds.get("di.py").get("code")
file_writer(di_path, codes, None)
print(
"Execute: \n\n\tvirtualenv venv\n\t./venv/Scripts/activate\n\tpip install -r requirements.txt\n\tConfigure environment variables\n\tpython ./src/local_deploy.py")
def add_route_event_manager(force: bool):
print("Exploring project and preparing source...")
to_add = adds.get("routes-event-mappers")
current_directory = os.getcwd()
exist, current_directory = if_exist_move(current_directory, 'src')
exist, current_directory = if_exist_move(current_directory, 'api')
if exist is False:
print("Oops!! It seems you are not in a project generated by zpy...")
print(f"Can't find the src/api/ directory")
print("\nRemember that you need execute command in the root path of the project.")
return
if not force and os.path.exists('./config.py'):
print("Oops!! Configuration file already exists...")
print("If you need replace execute the command with -f flag")
print("zpy add event-manager -f")
return
codes = to_add.get('code')
with open('./config.py', "w+", encoding="utf-8") as f:
f.writelines(codes['imports'])
f.writelines(codes['blocks'])
print("Route config file created successfully...")
def file_writer(di_path: str, codes: dict, data: Optional[List[Any]]):
print("Searching for dependencies file...")
try:
with open(di_path, "r+", encoding="utf-8") as f:
lines = f.readlines()
imports_to_add = []
for line_toad in codes["imports"]:
to_add = True
for line in lines:
if line.strip("\n\r\t") == line_toad.strip("\n\r\t"):
to_add = False
break
if to_add is True:
imports_to_add.append(place_holders(line_toad, data))
final_lines = [place_holders(line, data) for line in codes.get("blocks")]
index = get_last_import(lines)
old1 = lines[:index]
old = lines[index + 1:]
f.seek(0)
f.truncate(0)
f.writelines(old1)
f.writelines(imports_to_add)
f.writelines(old)
f.writelines(final_lines)
print("Configuration added into di.py...")
except Exception as e:
logging.exception(e) | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/cli/agregations/__init__.py | __init__.py |
from datetime import datetime
import re
from typing import List, Optional
def to_snake(text: str) -> str:
return re.sub(r'(?<!^)(?=[A-Z])', '_', text).lower()
_places_holders = [
{
"place": "@project_name",
"builder": lambda x: x[0]["value"]
},
{
"place": "@project_description",
"builder": lambda x: x[1]["value"]
},
{
"place": "@repository_name",
"builder": lambda x: f"{x[6]['value'].title()}Repository"
},
{
"place": "@repository_impl",
"builder": lambda x: f"Awesome{x[6]['value'].title()}Repository"
},
{
"place": "@repository_action",
"builder": lambda x: f"{to_snake(x[5]['value']).lower()}"
},
{
"place": "@repository_file",
"builder": lambda x: f"{x[6]['value'].lower()}_repository"
},
{
"place": "@usecase_file",
"ex": "GetUser",
"builder": lambda x: f"{to_snake(x[5]['value'])}"
},
{
"place": "@usecase_file_test",
"ex": "GetUser",
"builder": lambda x: f"{to_snake(x[5]['value'])}_test"
},
{
"place": "@usecase_class",
"ex": "GetUser",
"builder": lambda x: f"{x[5]['value']}"
},
{
"place": "@usecase_var",
"ex": "GetUser",
"builder": lambda x: f"{to_snake(x[5]['value'])}_uc"
},
{
"place": "@developer",
"builder": lambda x: f"{x[2]['value']}"
},
{
"place": "@dev_url",
"builder": lambda x: f"{x[3]['value']}"
},
{
"place": "@context_name",
"builder": lambda x: f"{x[4]['value']}"
},
{
"place": "@date",
"builder": lambda x: datetime.today().strftime('%Y-%m-%d')
},
{
"place": "@base_path",
"builder": lambda x: str(x[9]).replace("\\", "\\\\")
},
]
def place_holders(raw: str, data: Optional[List]) -> str:
if data is None:
return raw
for p in _places_holders:
try:
raw = raw.replace(p["place"], p["builder"](data))
except:
...
return raw | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/cli/scaffolding/placeholders.py | placeholders.py |
ignore: str = """# OS file control
**desktop.ini
**.DS_Store
.vscode
install_zpy.bat
run.bat
run.py
tests.py
resources
src/local_deploy.py
# AWS
/.aws-sam/
# Compiled class file
*.class
# Log file
*.log
# BlueJ files
*.ctxt
# Mobile Tools for Java (J2ME)
.mtj.tmp/
# Package Files #
*.jar
*.war
*.nar
*.ear
*.zip
*.tar.gz
*.rar
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
hs_err_pid*
# Xcode
#
# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
## User settings
xcuserdata/
## compatibility with Xcode 8 and earlier (ignoring not required starting Xcode 9)
*.xcscmblueprint
*.xccheckout
## compatibility with Xcode 3 and earlier (ignoring not required starting Xcode 4)
build/
DerivedData/
*.moved-aside
*.pbxuser
!default.pbxuser
*.mode1v3
!default.mode1v3
*.mode2v3
!default.mode2v3
*.perspectivev3
!default.perspectivev3
## Obj-C/Swift specific
*.hmap
## App packaging
*.ipa
*.dSYM.zip
*.dSYM
## Playgrounds
timeline.xctimeline
playground.xcworkspace
# Swift Package Manager
#
# Add this line if you want to avoid checking in source code from Swift Package Manager dependencies.
# Packages/
# Package.pins
# Package.resolved
# *.xcodeproj
#
# Xcode automatically generates this directory with a .xcworkspacedata file and xcuserdata
# hence it is not needed unless you have added a package configuration file to your project
# .swiftpm
.build/
# CocoaPods
#
# We recommend against adding the Pods directory to your .gitignore. However
# you should judge for yourself, the pros and cons are mentioned at:
# https://guides.cocoapods.org/using/using-cocoapods.html#should-i-check-the-pods-directory-into-source-control
#
# Pods/
#
# Add this line if you want to avoid checking in source code from the Xcode workspace
# *.xcworkspace
# Carthage
#
# Add this line if you want to avoid checking in source code from Carthage dependencies.
# Carthage/Checkouts
Carthage/Build/
# Accio dependency management
Dependencies/
.accio/
# fastlane
#
# It is recommended to not store the screenshots in the git repo.
# Instead, use fastlane to re-generate the screenshots whenever they are needed.
# For more information about the recommended setup visit:
# https://docs.fastlane.tools/best-practices/source-control/#source-control
fastlane/report.xml
fastlane/Preview.html
fastlane/screenshots/**/*.png
fastlane/test_output
# Code Injection
#
# After new code Injection tools there's a generated folder /iOSInjectionProject
# https://github.com/johnno1962/injectionforxcode
iOSInjectionProject/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit tests / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc tests coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# Snowpack dependency directory (https://snowpack.dev/)
web_modules/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variables file
.env
.env.test
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache
# Next.js build output
.next
out
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDBService Local files
.dynamodb/
# TernJS port file
.tern-port
# Stores VSCode versions used for testing VSCode extensions
.vscode-test
# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*
target/
pom.xml.tag
pom.xml.releaseBackup
pom.xml.versionsBackup
pom.xml.next
release.properties
dependency-reduced-pom.xml
buildNumber.properties
.mvn/timing.properties
# https://github.com/takari/maven-wrapper#usage-without-binary-jar
.mvn/wrapper/maven-wrapper.jar
.gradle
**/build/
!src/**/build/
# Ignore Gradle GUI config
gradle-app.setting
# Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored)
!gradle-wrapper.jar
# Cache of project
.gradletasknamecache
# # Work around https://youtrack.jetbrains.com/issue/IDEA-116898
# gradle/wrapper/gradle-wrapper.properties
# Built application files
*.apk
*.aar
*.ap_
*.aab
# Files for the ART/Dalvik VM
*.dex
# Java class files
*.class
# Generated files
bin/
gen/
out/
# Uncomment the following line in case you need and you don't have the release build type files in your app
# release/
# Gradle files
.gradle/
build/
# Local configuration file (sdk path, etc)
local.properties
# Proguard folder generated by Eclipse
proguard/
# Log Files
*.log
# Android Studio Navigation editor temp files
.navigation/
# Android Studio captures folder
captures/
# IntelliJ
*.iml
.idea/
.idea/workspace.xml
.idea/tasks.xml
.idea/gradle.xml
.idea/assetWizardSettings.xml
.idea/dictionaries
.idea/libraries
# Android Studio 3 in .gitignore file.
.idea/caches
.idea/modules.xml
# Comment next line if keeping position of elements in Navigation Editor is relevant for you
.idea/navEditor.xml
# Keystore files
# Uncomment the following lines if you do not want to check your keystore files in.
#*.jks
#*.keystore
# External native build folder generated in Android Studio 2.2 and later
.externalNativeBuild
.cxx/
# Google Services (e.g. APIs or Firebase)
# google-services.json
# Freeline
freeline.py
freeline/
freeline_project_description.json
# fastlane
fastlane/report.xml
fastlane/Preview.html
fastlane/screenshots
fastlane/test_output
fastlane/readme.md
# Version control
vcs.xml
# lint
lint/intermediates/
lint/generated/
lint/outputs/
lint/tmp/
# lint/reports/
# Android Profiling
*.hprof
""" | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/cli/scaffolding/ignore.py | ignore.py |
import logging
from typing import List, Dict
from .boilerplates import basic
from .placeholders import place_holders
import os
from .ignore import ignore
def scaffolding_dispatcher(in_data: List[Dict[str, str]], verbose=False, click=None):
basic_project(in_data, verbose)
def basic_project(in_data: List[Dict[str, str]], verbose=False):
try:
current_directory = os.getcwd()
name = in_data[0]["value"]
only_content = in_data[8]["value"]
force = True
# Path
path = current_directory
if only_content is False:
force = False
path = os.path.join(current_directory, name)
if os.path.isdir(path) is True:
print(f"Project {name} already exist!")
return
# mode
mode = 0o666
os.mkdir(path, mode)
if verbose is True:
print("Directory '% s' created" % path)
os.chdir(path)
if verbose is True:
print("Working in '% s'" % path)
in_data[9] = path
creator(path, basic, in_data, verbose, force)
if in_data[7]["value"] is True:
try:
os.system(f"code {path}")
except Exception as ex:
...
except Exception as e:
print("An critical error occurred while try to generate project...")
if verbose is True:
logging.exception(e)
def create_dir_and_move(current_directory, name, verbose, force) -> str:
path = os.path.join(current_directory, name)
if os.path.isdir(path) is True:
print(f"Project {name} already exist!")
return
mode = 0o666
os.mkdir(path, mode)
if verbose is True:
print("Directory '% s' created" % path)
os.chdir(path)
return path
def creator(c_dir: str, context: dict, info: List, verbose: bool, force: bool):
for k in context.keys():
os.chdir(c_dir)
c_file = context.get(k)
if c_file["type"] == 'file':
with open(place_holders(c_file["name"], info), "w+", encoding="utf-8") as f:
f.write(place_holders(c_file["content"], info))
elif c_file["type"] == "raw-file":
with open(place_holders(c_file["name"], info), "w+", encoding="utf-8") as f:
f.write(ignore)
elif c_file["type"] == "dir":
creator(create_dir_and_move(c_dir, place_holders(c_file["name"], info), verbose, force), c_file["child"],
info, verbose, force) | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/cli/scaffolding/__init__.py | __init__.py |
from logging import INFO, WARN, ERROR
from typing import Optional
from zpy.logger import ZLogger
from zpy.app import zapp_context as ctx, ZContext
from zpy.utils.values import if_null_get
from zpy.logger.structured import StructuredLog
class Loggable:
def __init__(self, logger: Optional[ZLogger] = None):
self.logger: ZLogger = if_null_get(logger, ctx().logger)
self.app = ctx().app_name
self.context: ZContext = ctx()
def log_exception(self, value: any, shippable: bool = False, **kwargs):
self.logger.exception(value, shippable, **kwargs)
def log_error(self, value: any, exc_info: any, shippable: bool = False):
self.logger.err(value, shippable, exc_info=exc_info)
def log_info(self, value: any, shippable: bool = False, **kwargs):
self.logger.info(value, shippable, **kwargs)
def log_warn(self, value: any, shippable: bool = False, **kwargs):
self.logger.warn(value, shippable, **kwargs)
def log_struct_info(self, message: str, flow: str = None, description: str = None, meta: Optional[dict] = None,
**kwargs) -> StructuredLog:
_flow = if_null_get(flow, self.context.get_flow())
log = StructuredLog(_flow, message, self.app, description, INFO, meta, **kwargs)
self.logger.info(log)
return log
def log_struct_warn(self, message: str, flow: str = None, description: str = None, meta: Optional[dict] = None,
**kwargs) -> 'StructuredLog':
log = StructuredLog(if_null_get(flow, self.context.get_flow()), message, self.app, description, WARN, meta,
**kwargs)
self.logger.warn(log)
return log
def log_struct_error(self, message: str, flow: str = None, description: str = None, meta: Optional[dict] = None,
**kwargs) -> 'StructuredLog':
log = StructuredLog(if_null_get(flow, self.context.get_flow()), message, self.app, description, ERROR, meta,
**kwargs)
self.logger.err(log)
return log | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/logger/utils.py | utils.py |
from typing import Optional, Any, List
from zpy.logger import ZLogger
from zpy.utils.values import if_null_get
from zpy.utils import get_env
from logging import getLevelName, INFO, WARN, ERROR, DEBUG
from zpy.utils.collections import walk_and_apply
from copy import copy
from zpy.app import zapp_context as ctx
sensitive_data_filters = []
main_logger: Optional[ZLogger] = None
main_logger_app: Optional[str] = None
def init_mask_meta_keys_filter(keys: List[str] = None):
keys = ['pass', 'password', 'bank', 'pwd', 'contraseΓ±a', 'contra', 'priv', 'banc', 'secret', 'clave']
def mask_meta_keys_filter(log: dict) -> dict:
def sensitive_data_masker(key, value) -> Any:
for sk in keys:
if isinstance(key, str) and sk in copy(key).lower():
return f"******{str(value)[-2:]}"
return value
if 'meta' in log and isinstance(log['meta'], dict):
walk_and_apply(log['meta'], sensitive_data_masker)
return log
return mask_meta_keys_filter
class StructuredLog(object):
@staticmethod
def add_sensitive_filter(log_filter):
global sensitive_data_filters
sensitive_data_filters.append(log_filter)
@staticmethod
def configure_logger(logger, app_name: str):
global main_logger
main_logger = logger
global main_logger_app
main_logger_app = app_name
def __init__(self, flow: str, message: str, app: str = None, description: str = None, level: int = INFO,
meta: Optional[dict] = None, **kwargs) -> None:
metadata = if_null_get(meta, {})
metadata.update(kwargs)
self.log: dict = {
'application': if_null_get(app, get_env("APP_NAME", "Unknown")),
'flow': flow,
'message': message,
'description': description,
'level': getLevelName(level),
'meta': metadata
}
if sensitive_data_filters:
self.log = [sf(self.log) for sf in sensitive_data_filters][0]
def __str__(self):
return f'{self.log}'
@staticmethod
def log_info(flow: str, message: str, description: str = None, meta: Optional[dict] = None,
**kwargs) -> 'StructuredLog':
log = StructuredLog(flow, message, main_logger_app, description, INFO, meta, **kwargs)
if_null_get(main_logger, ctx().logger).info(log)
return log
@staticmethod
def log_warn(flow: str, message: str, description: str = None, meta: Optional[dict] = None,
**kwargs) -> 'StructuredLog':
log = StructuredLog(flow, message, main_logger_app, description, WARN, meta, **kwargs)
if_null_get(main_logger, ctx().logger).warn(log)
return log
@staticmethod
def log_error(flow: str, message: str, description: str = None, meta: Optional[dict] = None,
**kwargs) -> 'StructuredLog':
log = StructuredLog(flow, message, main_logger_app, description, ERROR, meta, **kwargs)
if_null_get(main_logger, ctx().logger).err(log)
return log | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/logger/structured.py | structured.py |
import logging
import traceback
from abc import ABC, abstractmethod
from enum import Enum
from io import StringIO
from typing import Union, Any, Optional, Callable
class ZLFormat(Enum):
M = "%(message)s"
NM = "%(name)s %(message)s"
LNM = "%(name)s %(levelname)s %(message)s"
TM = "%(asctime)s %(message)s"
LM = "%(levelname)s - %(message)s"
TLM = "%(asctime)s - %(levelname)s - %(message)s"
TNLM = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
def setup_global_logger(log_format: str = ZLFormat.LM.value, level: int = logging.INFO, reset: Optional[bool] = False):
if reset is True:
if logging.getLogger().hasHandlers() is True:
logger = logging.getLogger()
for h in logger.handlers:
logger.removeHandler(h)
logging.basicConfig(format=log_format, level=level)
class ZLogger(ABC):
"""
Simple Logger
"""
@classmethod
@abstractmethod
def create(cls, name: str):
...
@abstractmethod
def raw(self, value: Any, shippable: Optional[bool] = False, **kwargs) -> None:
...
@abstractmethod
def info(self, value: Any, shippable: Optional[bool] = False, **kwargs) -> None:
...
@abstractmethod
def warn(self, value: Any, shippable: Optional[bool] = False, **kwargs) -> None:
...
@abstractmethod
def err(self, value: Any, shippable: Optional[bool] = False, **kwargs) -> None:
...
@abstractmethod
def ex(self, value: Any, shippable: Optional[bool] = False, **kwargs) -> None:
...
@abstractmethod
def exception(self, value: Any, shippable: Optional[bool] = False, **kwargs) -> None:
...
@abstractmethod
def release(self, name: Optional[str] = None) -> None:
...
@abstractmethod
def close(self) -> None:
...
class Shippable:
def __init__(self):
self.senders = []
def emit_if(self, value: Any, predicate):
if predicate:
self.emit(value)
def emit(self, value: Any):
for sender in self.senders:
sender(value)
def add_sender(self, sender: Callable[[Any], None]) -> None:
self.senders.append(sender)
class ZStreamLogger(ZLogger, Shippable):
"""
Stream Logger
"""
def __init__(self, name: str, level: int, log_format: Union[str, ZLFormat]) -> None:
super().__init__()
self._level = level
self._name = name
self._log_format = format if isinstance(format, str) else log_format.value
self._formatter = logging.Formatter(self._log_format)
self._logger = None
self._stream = None
self._handler = None
self.release(name)
@classmethod
def create(cls, name: str):
return cls(name, logging.INFO, ZLFormat.TNLM)
def raw(self, value: Any, shippable: Optional[bool] = False, **kwargs) -> None:
self._logger.log(self._level, value, **kwargs)
def info(self, value: Any, shippable: Optional[bool] = False, **kwargs) -> None:
self._logger.info(value, **kwargs)
def warn(self, value: Any, shippable: Optional[bool] = False, **kwargs) -> None:
self._logger.warning(value, **kwargs)
def err(self, value: Any, shippable: Optional[bool] = False, **kwargs) -> None:
self._logger.error(value, *kwargs)
def ex(self, value: Any, shippable: Optional[bool] = False, **kwargs) -> None:
self._logger.exception(msg=value, **kwargs)
def exception(self, value: Any, shippable: Optional[bool] = False, **kwargs) -> None:
self._logger.exception(msg=value, **kwargs)
def release(self, name: Optional[str] = None) -> None:
self.close()
self._logger = logging.getLogger(self._name if not name else name)
for h in self._logger.handlers:
self._logger.removeHandler(h)
self._logger.setLevel(self._level)
self._stream = StringIO()
self._handler = logging.StreamHandler(stream=self._stream)
self._handler.setLevel(self._level)
self._handler.setFormatter(self._formatter)
self._logger.addHandler(self._handler)
self._logger.propagate = False
def close(self) -> None:
if self._handler:
self._handler.flush()
self._handler.close()
if self._logger:
self._logger.removeHandler(self._handler)
if self._stream:
self._stream.close()
self._stream = None
def str_value(self) -> str:
return self._stream.getvalue()
def send(self):
if self._stream:
self.emit(self._stream.getvalue())
class zL(ZLogger, Shippable):
"""
Logger Wrapper
"""
def __init__(self, name: str, level: int, log_format: Union[str, ZLFormat]) -> None:
super().__init__()
self._level = level
self._name = name
self._log_format = format if isinstance(format, str) else log_format.value
self._formatter = logging.Formatter(self._log_format)
self._logger = None
self._handler = None
self.release(name)
def release(self, name: Optional[str] = None):
self._logger = logging.getLogger(self._name if not name else name)
for h in self._logger.handlers:
self._logger.removeHandler(h)
self._logger.setLevel(self._level)
self._handler = logging.StreamHandler()
self._handler.setLevel(self._level)
self._handler.setFormatter(self._formatter)
self._logger.addHandler(self._handler)
self._logger.propagate = False
@classmethod
def create(cls, name: str):
return cls(name, logging.INFO, ZLFormat.TNLM)
@classmethod
def create_for_cloud(cls, name: str):
return cls(name, logging.INFO, ZLFormat.LM)
def raw(self, value: Any, shippable: bool = False, *args, **kwargs):
self.emit_if(value, shippable)
self._logger.log(self._level, value, *args, **kwargs)
def info(self, value: Any, shippable: bool = False, **kwargs) -> None:
self.emit_if(value, shippable)
self._logger.info(value, **kwargs)
def warn(self, value: Any, shippable: bool = False, **kwargs) -> None:
self.emit_if(value, shippable)
self._logger.warning(value, **kwargs)
def err(self, value: Any, shippable: bool = False, **kwargs) -> None:
self.emit_if(value, shippable)
self.emit_if(traceback.format_exc(), shippable)
self._logger.error(value, *kwargs)
def exception(self, value: Any, shippable: bool = False, **kwargs) -> None:
self.emit_if(value, shippable)
self.emit_if(traceback.format_exc(), shippable)
self._logger.exception(msg=value, **kwargs)
def close(self):
if self._handler:
self._handler.flush()
self._handler.close()
if self._logger:
self._logger.removeHandler(self._handler)
@staticmethod
def w(msg: object, *args):
logging.warning(msg=msg, *args)
@staticmethod
def i(msg: object, *args):
"""Information Level Log
Args:
msg (object): value
"""
logging.info(msg=msg, *args)
@staticmethod
def e(msg: object, exc_info=None, *args):
"""Error Level Log
Args:
@param msg:
@param exc_info:
"""
logging.error(msg=msg, exc_info=exc_info, *args)
@staticmethod
def ex(msg: object, **kwargs):
"""Exception Level Log
Args:
@param msg:
"""
logging.exception(msg=msg, **kwargs)
@staticmethod
def d(msg: object, *args):
"""Debug Level Log
Args:
msg (object): value
"""
logging.debug(msg=msg, *args) | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/logger/__init__.py | __init__.py |
from abc import ABC, abstractmethod
from typing import Generic, TypeVar, List, Union, Any, Dict, Callable, Optional
from copy import copy
from zpy.api.http.errors import ZHttpError, BadRequest
from zpy.api.http.status_codes import HttpStatus
from zpy.utils.funcs import Maybe, if_get
from zpy.utils.values import if_null_get
from zpy.app import zapp_context as ctx
T = TypeVar("T")
S = TypeVar("S")
class UCMeta:
def __init__(self, identifier: str, key_identifier: str = None) -> None:
self.id = identifier
if key_identifier:
setattr(self, key_identifier, identifier)
class Selectable:
def __init__(self, identifier: str, key_identifier: str = None, weight: float = 1.0) -> None:
"""
@param identifier: value of selector based. E.g. UserCreator
@param key_identifier: key of selector based. E.g. action
"""
if not key_identifier:
key_identifier = 'id'
self.weight = weight
self.name = identifier
self.identifiers: Dict[str, Dict[str, str]] = {
key_identifier: {"value": identifier, "weight": weight}
}
def configure_for_all(self, weight: float = 2.0):
self.identifiers['*'] = {"value": '*', "weight": weight}
def configure(self, uc_identifier: Any, weight: float, key_identifier: Any = 'id'):
self.identifiers[key_identifier] = {
"value": uc_identifier,
"weight": weight
}
def execute_when(self, event: str, sort_weight: float, key_identifier: Any = 'id'):
"""
The use case will be executed when event arrive to selector.
@param event: Event name to associate
@param sort_weight: weight for sort case execution
@param key_identifier:
@return:
"""
self.configure(event, sort_weight, key_identifier)
def execute_always(self, sort_weight: float = 2.0):
"""
The use case will be executed on all events that arrive and are executed.
@param sort_weight:
@return: None
"""
self.identifiers['*'] = {"value": '*', "weight": sort_weight}
class UseCase(ABC, Generic[T, S]):
def __init__(self, name: Any = None):
self.name = name
def before(self, *args, **kwargs):
pass
@abstractmethod
def execute(self, payload: T, *args, **kwargs) -> S:
"""Execute use case"""
pass
def after(self, *args, **kwargs):
pass
class UseCaseSelector(UseCase, UCMeta):
def __init__(self, use_cases: List[Union[UseCase, UCMeta, Any]], action_keys: List[str] = None,
key_uc_identifier: str = 'id', selector_id='default', payload_keys: List[str] = None):
UCMeta.__init__(self, identifier=selector_id)
self.cases = {getattr(x, key_uc_identifier): x for x in use_cases}
self.action_keys = if_null_get(action_keys, ['action'])
self.key_identifier = key_uc_identifier
self.payload_keys = if_null_get(payload_keys, ['payload'])
def execute(self, data: dict, context: Any = None, *args, **kwargs) -> dict:
action = None
for key_action in self.action_keys:
if key_action in data:
action = key_action
break
if action is None:
raise ValueError(f'Request provided is malformed. Missing {action} key!')
operation: Union[UseCase, UCMeta] = self.cases.get(data[action], None)
if not operation:
raise ValueError(f"Use case for action: {data['action']} not registered in selector.")
payload_key = None
for pk in self.payload_keys:
if pk in data:
payload_key = pk
break
payload = data.get(payload_key, data)
return operation.execute(payload, context=context)
class CaseSelector(UseCase, Selectable):
def __init__(self, use_cases: List[Union[UseCase, Selectable, Any]], action_keys: List[str] = None,
key_uc_identifier: str = 'id', selector_id='default', payload_keys: List[str] = None,
payload_mutation: bool = False, safe_execution: bool = False):
Selectable.__init__(self, identifier=selector_id, key_identifier=key_uc_identifier)
self.action_keys = if_null_get(action_keys, ['action'])
self.payload_keys = if_null_get(payload_keys, ['payload'])
self.multi_cases_key = 'cases'
self.allow_payload_mutation = payload_mutation
self.on_before = None
self.on_after = None
self.on_error = None
self.on_case: Optional[Callable[[str, Dict], Dict]] = None
self.before_continue: Optional[Callable[[str, Dict, Dict, Dict], Dict]] = None
self.safe_execution = safe_execution
self.cases = Maybe(use_cases) \
.bind(self.__group_cases) \
.bind(self.__sort_cases) \
.value
def configure_for_multiple(self, multiple_main_key: str) -> 'CaseSelector':
self.multi_cases_key = multiple_main_key
return self
def configure_error_notifier(self, error_notifier: Callable[[Any, Any, Any], Any]):
self.on_error = error_notifier
@staticmethod
def __group_cases(cases: List[Union[UseCase, Selectable, Any]]):
group_to_sort = {}
for case in cases:
for k, v in case.identifiers.items():
if v['value'] not in group_to_sort:
group_to_sort[v['value']] = {
v['weight']: case
}
continue
group_to_sort[v['value']][v['weight']] = case
return group_to_sort
@staticmethod
def __sort_cases(cases: Dict[str, Dict[Any, Union[UseCase, Selectable, Any]]]):
return {c: [x[1] for x in sorted(cases[c].items())] for c in cases}
def handle(self, data: dict, context: Any = None, *args, **kwargs) -> dict:
"""
Handle multiple cases
@param data: event
@param context:
@param args:
@param kwargs:
@return:
"""
def build_result(x_status: tuple, content_result: Any, metadata: Any, details: Any) -> dict:
return {
'status': {
"code": x_status[0],
"status": x_status[1],
"message": x_status[2],
'details': details
},
'body': content_result,
'metadata': metadata,
}
def find_identifier(raw_payload: dict, identifiers: List[str]) -> str:
action = None
for key_action in identifiers:
if key_action in raw_payload:
action = key_action
break
if action is None:
raise ValueError(f'Request provided is malformed. Missing event identifier!')
return action
if self.on_before:
self.on_before()
current_case_execution = None
final_results = []
case_id_to_execute = None
try:
if not data or self.multi_cases_key not in data:
raise BadRequest(f'Request provided is malformed. Missing {self.multi_cases_key} key!')
raw_cases_to_execute = data[self.multi_cases_key]
for case in raw_cases_to_execute:
current_case_result = {
"event": None,
"executions": []
}
try:
case_id_to_execute = find_identifier(case, self.action_keys)
cases_to_execute: List[Union[UseCase, Selectable]] = self.cases.get(case[case_id_to_execute], [])
cases_to_execute = sorted(cases_to_execute + self.cases.get('*', []), key=lambda x: x.weight)
current_case_result['event'] = case.get(case_id_to_execute, None)
current_case_execution = current_case_result['event']
if not cases_to_execute:
raise ValueError(f"Use case for event: {case[case_id_to_execute]} not registered in selector.")
payload_key = find_identifier(case, self.payload_keys)
payload = case.get(payload_key, None)
for x_case in cases_to_execute:
ctx().logger.info(f'β‘ Running case: {x_case.name}...')
payload = if_get(self.allow_payload_mutation, payload, copy(payload))
meta = case.get('metadata', None)
if self.on_case:
payload = self.on_case(x_case.name, payload)
x_case.before(payload=payload, context=context, metadata=meta)
result = x_case.execute(payload, context=context, metadata=meta)
x_case.after(payload=payload, context=context, metadata=meta, result=result)
current_case_result['executions'].append({
"event": case[case_id_to_execute],
"body": result
})
final_results.append(current_case_result)
if self.before_continue:
self.before_continue(x_case.name, payload, meta, result)
except Exception as e:
ctx().logger.exception(f"An error occurred while processing task: {case[case_id_to_execute]} ",
exc_info=e)
current_case_result['error'] = str(e)
current_case_result['executions'].append({
"event": case[case_id_to_execute],
"body": None
})
final_results.append(current_case_result)
if self.safe_execution is True:
raise
return build_result(HttpStatus.SUCCESS.value, final_results, None, None)
except ZHttpError as e:
ctx().logger.exception("An error occurred while processing task ", exc_info=e)
status = e.status.value
if self.on_error:
self.on_error(data, context, e, status, if_null_get(e.reason, status[2]), current_case_execution)
details = if_null_get(e.details, [])
if e.message:
status = (status[0], status[1], e.message)
if e.reason:
details.append(e.reason)
if self.on_after:
self.on_after()
return build_result(status, final_results, e.metadata, e.details)
except Exception as e:
ctx().logger.exception("An error occurred while processing case ", exc_info=e)
status = HttpStatus.INTERNAL_SERVER_ERROR.value
if self.on_error:
self.on_error(data, context, e, status, status[2], current_case_execution)
if self.on_after:
self.on_after()
return build_result(status, final_results, None, [str(e)])
def execute(self, data: dict, context: Any = None, *args, **kwargs) -> List[Any]:
action = None
for key_action in self.action_keys:
if key_action in data:
action = key_action
break
if action is None:
raise ValueError(f'Request provided is malformed. Missing {action} key!')
cases_to_execute: List[Union[UseCase, Selectable]] = self.cases.get(data[action], [])
cases_to_execute = sorted(cases_to_execute + self.cases.get('*', []), key=lambda x: x.weight)
if not cases_to_execute:
raise ValueError(f"Use case for action: {data[action]} not registered in selector.")
payload_key = None
for pk in self.payload_keys:
if pk in data:
payload_key = pk
break
payload = data.get(payload_key, data)
results = []
if self.on_before:
self.on_before()
for x_case in cases_to_execute:
ctx().logger.info(f'β‘ Running case: {x_case.name}...')
try:
_payload = if_get(self.allow_payload_mutation, payload, copy(payload))
x_case.before(payload=_payload, context=context)
result = x_case.execute(_payload, context=context)
if isinstance(result, dict):
result['event'] = data[action]
results.append(result)
x_case.after(payload=_payload, context=context, result=result)
if self.before_continue:
self.before_continue(x_case.name, payload, context, result)
except Exception as e:
ctx().logger.exception(f"An error occurred while processing case: {x_case.name} ", exc_info=e)
if self.safe_execution is True:
break
if self.on_after:
self.on_after()
return results | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/app/usecase.py | usecase.py |
from dataclasses import dataclass
from typing import Any, Callable, Optional
from zpy.app import zapp_context as ctx
from zpy.utils.funcs import safe_exec_wrapper
from zpy.utils.values import if_null_get
@dataclass
class SQSResource:
event: str
name: str
attributes: Optional[dict]
def get_sqs_url_from(sqs_client: Any, sqs_name: str) -> str:
"""
Retrieve sqs url from name
@param sqs_client:
@param sqs_name:
@return: url
"""
return sqs_client.get_queue_url(
QueueName=sqs_name,
)['QueueUrl']
def send_secure_message_to_sqs(sqs_client: Any, payload: str, origin: str, attributes=None, sqs_name: str = None,
sqs_url: str = None, delay: int = 0,
error_notifier: Optional[Callable[[str], None]] = None) -> Optional[dict]:
"""
Helper declarative function to send sqs message
@param sqs_client: SQS Client built from boto3
@param payload: Message to send
@param attributes: Custom attributes to send
@param origin: Origin of emitted message
@param sqs_name: SQS Name
@param sqs_url: SQS Url
@param delay: Delay seconds
@param error_notifier: Error reporting function
@note If sqs name is null, sqs url is required or if sqs_url is null, sqs_name is required
@return: sqs response
"""
return safe_exec_wrapper(
target=send_message_to_sqs,
args=[sqs_client, payload, origin, attributes, sqs_name, sqs_url, delay],
kwargs=None,
msg=f"An error occurred while try to send message to: {if_null_get(sqs_name, sqs_url)}",
notifier=error_notifier,
default_ret=None
)
def send_message_to_sqs(sqs_client: Any, payload: str, origin: str, attributes=None, sqs_name: str = None,
sqs_url: str = None, delay: int = 0) -> dict:
"""
Helper declarative function to send sqs message
@param sqs_client: SQS Client built from boto3
@param payload: Message to send
@param attributes: Custom attributes to send
@param origin: Origin of emitted message
@param sqs_name: SQS Name
@param sqs_url: SQS Url
@param delay: Delay seconds
@note If sqs name is null, sqs url is required or if sqs_url is null, sqs_name is required
@return: sqs response
"""
if not sqs_name and not sqs_url:
raise ValueError("SQS Name or SQS Url is required")
if attributes is None:
attributes = {}
if 'ZOrigin' not in attributes:
attributes['ZOrigin'] = {
'DataType': 'String',
'StringValue': origin if origin else 'Unknown'
}
return sqs_client.send_message(
QueueUrl=get_sqs_url_from(sqs_client, sqs_name) if sqs_name else sqs_url,
DelaySeconds=delay,
MessageAttributes=attributes,
MessageBody=payload
)
def send_message_to_fifo_sqs(sqs_client: Any, payload: str, origin: str, attributes=None, sqs_name: str = None,
sqs_url: str = None, delay: int = 0, group_id: str = None,
deduplication_id: str = None) -> dict:
"""
Helper declarative function to send sqs message
@param deduplication_id: Only for fifo
@param group_id: Only for Fifo
@param sqs_client: SQS Client built from boto3
@param payload: Message to send
@param attributes: Custom attributes to send
@param origin: Origin of emitted message
@param sqs_name: SQS Name
@param sqs_url: SQS Url
@param delay: Delay seconds
@note If sqs name is null, sqs url is required or if sqs_url is null, sqs_name is required
@return: sqs response
"""
if not sqs_name and not sqs_url:
raise ValueError("SQS Name or SQS Url is required")
if attributes is None:
attributes = {}
if 'ZOrigin' not in attributes:
attributes['ZOrigin'] = {
'DataType': 'String',
'StringValue': origin if origin else 'Unknown'
}
return sqs_client.send_message(
QueueUrl=get_sqs_url_from(sqs_client, sqs_name) if sqs_name else sqs_url,
DelaySeconds=delay,
MessageAttributes=attributes,
MessageBody=payload,
MessageDeduplicationId=deduplication_id,
MessageGroupId=group_id
)
def send_wake_up_signal(sqs_client: Any, metadata: str, sqs_name: str = None, sqs_url: str = None,
origin: str = "Unknown"):
logg = ctx().logger
destination = if_null_get(sqs_name, sqs_url)
try:
response = send_message_to_sqs(
sqs_client=sqs_client,
payload=metadata,
origin=origin,
attributes={
'ZType': {
'DataType': 'String',
'StringValue': 'WakeUpSignal'
}
},
sqs_name=sqs_name,
sqs_url=sqs_url)
logg.info(f"Wake up signal was sent successfully to {destination}, id: {response['MessageId']}")
except Exception as e:
logg.err(f"An error occurred while try to send Wake up signal to: {destination}", exc_info=e) | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/cloud/aws/sqs.py | sqs.py |
import dataclasses
import json
from typing import Callable, Optional, Any, Union, Tuple, Dict
from zpy.api.http.errors import ZHttpError
from zpy.api.http.status_codes import HttpStatus
from zpy.app import zapp_context as ctx
class LambdaEventHandler(object):
"""
Lambda event handler
"""
_DEFAULT_ERR_MSG = 'An error occurred while processing event data'
CORS_HEADERS = {
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "POST"
}
def __init__(self, processor: Optional[Callable[[dict, Any], Any]], strict=False, verbose: bool = False,
jsonfy=True, signal_processor: Optional[Callable[[dict, Any], Any]] = None,
on_before: Optional[Callable[[dict, Any], Tuple[Dict, Any]]] = None,
on_after: Optional[Callable[[dict], Dict]] = None):
self.processor = processor
self.strict = strict
self.verbose = verbose
self.jsonfy = jsonfy
self.request_parser = None
self.response_builder = None
self.error_notifier = None
self.send_str_exception = True
self.starting_process_logger = None
self.common_fatal_error_msg = f'Fatal :: {self._DEFAULT_ERR_MSG}'
self.signal_processor = signal_processor
self.signal_event_key = 'zpy_event_action'
self.on_before = on_before
self.on_after = on_after
self.before: Optional[Callable[[], None]] = None
self.after: Optional[Callable[[], None]] = None
def configure(self, response_builder: Callable[[Any, Any], Any] = None,
request_parser: Callable[[Any, Any], Any] = None,
error_notifier: Callable[[Any, Any], None] = None, send_str_exception: bool = True,
starting_process_logger: Callable[[Any, Any], None] = None,
common_error_msg: str = None) -> 'LambdaEventHandler':
self.response_builder = response_builder
self.request_parser = request_parser
self.error_notifier = error_notifier
self.send_str_exception = send_str_exception
self.starting_process_logger = starting_process_logger
self.configure_error_msg(self._DEFAULT_ERR_MSG if common_error_msg is None else common_error_msg)
return self
def configure_error_msg(self, msg: str) -> None:
self.common_fatal_error_msg = f'Fatal :: {msg}'
@classmethod
def of(cls, processor: Optional[Callable[[dict, dict], Any]], strict=False, verbose=False, jsonfy=True):
return cls(processor, strict, verbose, jsonfy)
@staticmethod
def proxy(event, context, handler):
return handler.run(event, context)
def __notify_error(self, record, msg):
if self.error_notifier:
self.error_notifier(record, msg)
def is_wake_up_signal(self, record: dict):
if not record:
return False
prop_key = record.get(self.signal_event_key, None)
return prop_key == 'WakeUpSignal'
def run(self, event, context) -> Any:
logger = ctx().logger
result = None
try:
if self.on_before:
e, c = self.on_before(event, context)
event = e
context = context
if self.before:
self.before()
if self.request_parser:
event = self.request_parser(event, context)
if not self.processor:
msg = "The lambda event processor cant be null."
self.__notify_error(event, msg)
return msg
if type(event) is not dict:
if self.jsonfy:
event = json.loads(event)
if self.verbose:
logger.info(f"Starting event processing with data: {event}")
if self.starting_process_logger:
self.starting_process_logger(event, context)
if self.is_wake_up_signal(event):
result = "Wake up signal received successfully"
if self.signal_processor:
result = self.signal_processor(event, context)
else:
result = self.processor(event, context)
if self.verbose:
logger.info(result)
except Exception as e:
logger.exception("An error occurred processing event", exc_info=e)
self.__notify_error(event, str(e))
if self.on_after:
try:
result = self.on_after(result)
except Exception as e:
logger.exception("An error occurred execute on after hook", exc_info=e)
if self.after:
try:
self.after()
except Exception as e:
logger.exception("An error occurred execute on simple after hook", exc_info=e)
return {"results": result}
@dataclasses.dataclass
class LambdaEventResult(object):
status: HttpStatus
payload: dict
success: bool
event: str
def __init__(self, code: HttpStatus, payload: dict, success: bool, event: str = None):
self.status = code
self.payload = payload
self.success = success
self.event = event
@dataclasses.dataclass
class SuccessEventResult(LambdaEventResult):
def __init__(self, payload: Union[dict, Any], status: HttpStatus = HttpStatus.SUCCESS):
LambdaEventResult.__init__(self, status, payload, True, None)
def build_response(status: HttpStatus, payload: dict, success: bool, message: str = None, reason: str = None):
return {
"status": {
"code": status.value[0],
"message": message if message else status.value[2],
"details": [reason] if reason else None,
"status": status.value[1]
},
"payload": json.dumps(payload),
"success": success
}
def lambda_response(default_status: HttpStatus = HttpStatus.SUCCESS, notifier: Callable[[str], None] = None,
raise_exc=False):
"""
HTTP Response builder, build response from data provided
@return: None
@contact https://www.linkedin.com/in/zurckz
@author NoΓ© Cruz | Zurck'z 20
@since 16-05-2020
"""
def lambda_inner_builder(invoker: Callable):
def wrapper_lambda_handler(*args, **kwargs):
try:
response: Any = invoker(*args, **kwargs)
if isinstance(response, LambdaEventResult) or isinstance(response, SuccessEventResult):
return build_response(response.status, response.payload, response.success)
return build_response(default_status, response, True)
except ZHttpError as e:
msg = f"An error occurred while processing event: Reason: {e.reason}. Message: {e.message}"
ctx().logger.exception(msg, exc_info=e)
if notifier:
notifier(msg)
if raise_exc is True:
raise Exception("An error occurred while processing main process")
return build_response(e.status, e.metadata, False, e.message, e.reason)
except Exception as e:
msg = f"An error occurred while processing event. {str(e)}"
ctx().logger.exception(msg, exc_info=e)
if notifier:
notifier(msg)
if raise_exc is True:
raise Exception("An error occurred while processing main process")
return build_response(HttpStatus.INTERNAL_SERVER_ERROR, None, False)
wrapper_lambda_handler.__name__ = invoker.__name__
return wrapper_lambda_handler
return lambda_inner_builder
def lambda_api_response(content_type: Optional[str] = 'application/json', default_code: int = 200,
cors: bool = True):
"""
HTTP Response builder, build response from data provided
@param cors:
@param default_code:
@param content_type: Response content type. Default: application/json
@return: None
@contact https://www.linkedin.com/in/zurckz
@author NoΓ© Cruz | Zurck'z 20
@since 16-05-2020
"""
extra_headers = {'Content-Type': content_type}
def lambda_inner_builder(invoker: Callable):
def wrapper_lambda_handler(*args, **kwargs):
response: Any = invoker(*args, **kwargs)
if isinstance(response, tuple) and len(response) == 3:
data, code, header = response
if not header:
header = extra_headers
header.update(extra_headers)
else:
data = response
code = default_code
header = extra_headers
if cors:
header.update(LambdaEventHandler.CORS_HEADERS)
return {
"statusCode": code,
"body": json.dumps(data),
"headers": header
}
wrapper_lambda_handler.__name__ = invoker.__name__
return wrapper_lambda_handler
return lambda_inner_builder | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/cloud/aws/lambdax.py | lambdax.py |
from typing import Callable, Optional, Any, Union, Dict
from zpy.app import zapp_context as ctx
from zpy.containers import shared_container
import json
import copy
class SQSHandler(object):
_DEFAULT_ERR_MSG = 'An error occurred while processing record data'
def __init__(self, processor: Optional[Callable[[dict, dict], Any]], single: Optional[bool] = False,
strict=False, verbose: bool = False, jsonfy=True, re_enqueue: bool = False,
signal_processor: Optional[Callable[[dict, dict], dict]] = None):
self.processor = processor
self.single_item = single
self.strict = strict
self.verbose = verbose
self.response_builder = None
self.request_parser = None
self.error_notifier = None
self.send_str_exception = True
self.starting_process_logger = None
self.jsonfy = jsonfy
self.re_enqueue = re_enqueue
self.common_fatal_error_msg = f'Fatal :: {self._DEFAULT_ERR_MSG}'
self.signal_processor = signal_processor
self.before: Optional[Callable[[Dict], None]] = None
self.after: Optional[Callable[[Dict], None]] = None
self.on_body: Optional[Callable[[Dict, Dict, str], Dict]] = None
def configure(self, response_builder: Callable[[Any], Any] = None,
request_parser: Callable[[Any], Any] = None,
error_notifier: Callable[[Any, Any], None] = None, send_str_exception: bool = True,
starting_process_logger: Callable[[Any, Any], None] = None,
common_error_msg: str = None) -> 'SQSHandler':
self.response_builder = response_builder
self.request_parser = request_parser
self.error_notifier = error_notifier
self.send_str_exception = send_str_exception
self.starting_process_logger = starting_process_logger
self.configure_error_msg(self._DEFAULT_ERR_MSG if common_error_msg is None else common_error_msg)
return self
def configure_error_msg(self, msg: str) -> None:
self.common_fatal_error_msg = f'Fatal :: {msg}'
@classmethod
def of(cls, processor: Optional[Callable[[dict, dict], Any]], single: Optional[bool] = False, strict=False,
verbose=False, jsonfy=True):
return cls(processor, single, strict, verbose, jsonfy)
@staticmethod
def proxy(event, handler):
return handler.run(event)
def __notify_error(self, record, msg):
if self.error_notifier:
self.error_notifier(record, msg)
@staticmethod
def is_wake_up_signal(record: dict):
if not record:
return False
msg_attrs = record.get('messageAttributes', None)
if msg_attrs:
attrs: dict = msg_attrs.get('ZType', None)
if not attrs:
return False
value = attrs.get('stringValue', None)
return value == 'WakeUpSignal'
return False
def run(self, event):
logger = ctx().logger
failed = False
if self.before:
try:
self.before(event)
except Exception as e:
logger.exception("An error occurred execute on simple before hook", exc_info=e)
if self.request_parser:
event = self.request_parser(event)
if not self.processor:
msg = "The sqs record processor cant be null."
self.__notify_error(None, msg)
return msg
if 'Records' not in event:
if self.strict:
msg = "Fatal :: Could not continue processing, invalid event. Missing {Records} key."
self.__notify_error(event, msg)
logger.err(msg)
return msg
event = {'Records': [event]}
batch_results = []
for index, record in enumerate(event["Records"]):
body: Optional[Union[dict, str]] = None
x_record = copy.copy(record)
record_id = index if 'messageId' not in record else record['messageId']
shared_container["aws_sqs_message_id"] = record_id
try:
if 'body' not in record:
if self.strict:
msg = "Fatal :: Invalid item, could not continue processing. Missing {body} key."
self.__notify_error(x_record, msg)
if self.single_item:
return msg
continue
body = record
else:
body = record.pop('body')
if type(body) is not dict:
if self.jsonfy:
body = json.loads(body)
if self.verbose:
logger.info(f"Starting record processing: {record_id} with data: {body}")
if self.starting_process_logger:
self.starting_process_logger(body, record)
result = None
if self.on_body:
body = self.on_body(body, record, record_id)
if SQSHandler.is_wake_up_signal(record):
result = "Wake up signal received successfully"
if self.signal_processor:
result = self.signal_processor(body, record)
else:
result = self.processor(body, record)
if self.verbose:
logger.info(result)
batch_results.append({
"result": result,
"message_id": record_id
})
if self.single_item:
break
except Exception as e:
failed = True
msg_err = f'{self.common_fatal_error_msg}... {str(e) if self.send_str_exception else ""}'
self.__notify_error(x_record, msg_err)
logger.exception(f"{self.common_fatal_error_msg}: {body}\n", exc_info=e)
if self.re_enqueue:
raise Exception("Exception autogenerated for re enqueue record...")
response = {
'message': "Finished Process." if not failed else "Finished process with errors.",
"results": batch_results
}
if self.after:
try:
self.after(response)
except Exception as e:
logger.exception("An error occurred execute on simple after hook", exc_info=e)
if self.response_builder:
response = self.response_builder(response)
return response
def single(self, body, context):
... | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/cloud/aws/__init__.py | __init__.py |
import json
import typing
from abc import ABC, abstractmethod
from copy import copy
from datetime import datetime, date
from decimal import Decimal
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, NamedTuple, Union
from marshmallow import utils
from marshmallow.fields import Field
from marshmallow_objects import models
from zpy.api.http.errors import BadRequest, ZHttpError
from zpy.utils.funcs import safely_exec
from zpy.utils.values import if_null_get
from dataclasses import dataclass, fields
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
# Value Objects
class IntID(NamedTuple):
"""
Value object to represent an integer id
Must be an integer value.
Must be greater than or equal to 0
"""
value: int
@staticmethod
def of(value: int, entity_name: str = '', validator: Optional[Callable[[int], int]] = None,
allow_zero: bool = False):
"""
Create new id value object
@param allow_zero:
@param value:
@param entity_name: Entity to which the id belongs
@param validator: Custom validator
@return: IntID instance
"""
if validator is not None:
return IntID(validator(value))
value = safely_exec(lambda x: int(value), [value])
if isinstance(value, int) is False:
raise BadRequest(f"Invalid value for {entity_name} identifier provided.",
"The value of id must be integer.",
meta={"value": value})
if value == 0 and not allow_zero:
raise BadRequest(f"Invalid value for {entity_name} identifier provided.",
"The value of id must be greater than to 0.",
meta={"value": value})
if value < 0:
raise BadRequest(f"Invalid value for {entity_name} identifier provided.",
"The value of id must be greater than or equal to 0.",
meta={"value": value})
return IntID(value)
@property
def val(self):
"""
id value
@return: int value
"""
return self.value
class IntValue(NamedTuple):
"""
Value object to represent an integer id
Must be an integer value.
Must be greater than or equal to 0
"""
value: int
@staticmethod
def of(value: Union[int, str], entity_name: str = '', validator: Optional[Callable[[int], int]] = None,
allow_negative: bool = True):
"""
Create new id value object
@param allow_negative:
@param value:
@param entity_name: Entity to which the id belongs
@param validator: Custom validator
@return: IntID instance
"""
if validator is not None:
return IntValue(validator(value))
value = safely_exec(lambda x: int(value), [value])
if isinstance(value, int) is False:
raise BadRequest(f"Invalid value for '{entity_name}' provided.",
"The data type of value provided must be integer.",
meta={"value": value})
if not allow_negative and value < 0:
raise BadRequest(f"Invalid provided value for '{entity_name}'.",
"The value must be greater than or equal to 0.",
meta={"value": value})
return IntValue(value)
@property
def val(self):
"""
id value
@return: int value
"""
return self.value
def default_remove():
return [
"_http_status_",
"__dump_lock__",
"__schema__",
"__missing_fields__",
"__setattr_func__",
"_ZObjectModel__remove_keys",
"_ZObjectModel__update_items",
"__use_native_dumps__",
"dump_mode"
]
class ZObjectModel(models.Model):
"""
Zurckz Model
"""
def __init__(
self,
exclude: Optional[List[str]] = None,
include: Optional[Dict[Any, Any]] = None,
context=None,
partial=None,
use_native_dumps=False,
**kwargs
):
super().__init__(context=context, partial=partial, **kwargs)
self.__remove_keys = default_remove() + if_null_get(exclude, [])
self.__update_items = if_null_get(include, {})
self.__use_native_dumps__ = use_native_dumps
def __str__(self):
"""
Dump nested models by own properties
"""
data = copy(self.__dict__)
if self.__update_items is not None:
data.update(self.__update_items)
[data.pop(k, None) for k in self.__remove_keys]
for k in data.keys():
if isinstance(data[k], models.Model):
data[k] = json.loads(str(data[k]))
elif isinstance(data[k], list):
data[k] = [json.loads(str(it)) for it in data[k]]
elif isinstance(data[k], datetime) or isinstance(data[k], date):
data[k] = str(data[k])
elif isinstance(data[k], Decimal):
data[k] = float(data[k])
return json.dumps(data)
def nat_dump(
self,
exclude_keys: Optional[List[str]] = None,
include: Optional[Dict[Any, Any]] = None,
mutator: Optional[Callable[[Dict], Dict]] = None,
map_args: Optional[List[Any]] = None,
store_ex: bool = False,
store_in: bool = False
):
"""
Dump object using native strategy
@param exclude_keys:
@param include:
@param mutator:
@param map_args:
@param store_ex:
@param store_in:
@return:
"""
return self.sdump(exclude_keys, include, mutator, map_args, store_ex, store_in, True)
def sdump(
self,
exclude_keys: Optional[List[str]] = None,
include: Optional[Dict[Any, Any]] = None,
mutator: Optional[Callable[[Dict], Dict]] = None,
map_args: Optional[List[Any]] = None,
store_ex: bool = False,
store_in: bool = False,
use_native_dumps=False
):
"""
Model dump to json safely, checking the exclude key list
Use this function instead of zdump.
Parameters:
-----------
exclude_keys: List[str], Optional,
List of string keys of exlude in dump process
include: Dict[Any,Any], Optional,
Object to include in model object after exclude process before of dump process
mutator: Callable, Optional
Callable function to tranform object after exclude and include process
map_args: List[Any], Optional
Argument list to passed to map callable function
store_ex: bool, optional
Indicate that the exclude key added to global model exclude key array
store_in: bool, optional
Indicate that the include object added to global model object
"""
data = copy(self.__dict__)
if map_args is None:
map_args = []
native = use_native_dumps if use_native_dumps is True else self.__use_native_dumps__
if native is True:
with self.__dump_mode_on__():
data = self.__schema__.dump(self)
temp_exclude = copy(self.__remove_keys)
if exclude_keys is not None:
temp_exclude = self.__remove_keys + exclude_keys
if store_ex:
self.__remove_keys = self.__remove_keys + exclude_keys
[data.pop(k, None) for k in temp_exclude]
temp_include = copy(self.__update_items)
if include is not None:
temp_include.update(include)
data.update(temp_include)
if store_in:
self.__update_items.update(include)
else:
if temp_include is not None:
data.update(temp_include)
if mutator is not None:
data = mutator(data, *map_args)
if native is True:
return data
# TODO Verify this process when native is False
for k in data.keys():
if isinstance(data[k], models.Model):
data[k] = json.loads(str(data[k]))
elif isinstance(data[k], list):
inner_list = []
for it in data[k]:
if isinstance(it, str):
inner_list.append(it)
else:
inner_list.append(json.loads(str(it)))
data[k] = inner_list
elif isinstance(data[k], datetime) or isinstance(data[k], date):
data[k] = str(data[k])
elif isinstance(data[k], Decimal):
data[k] = float(data[k])
return data
def build(self):
data = copy(self.__dict__)
if self.__update_items is not None:
data.update(self.__update_items)
[data.pop(k, None) for k in self.__remove_keys]
return data
class MutatorMode(Enum):
DESERIALIZATION = "D"
SERIALIZATION = "S"
ALL = "*"
class FieldMutator(ABC):
@abstractmethod
def exec(self, value: Any) -> Any:
...
class ZMutator(FieldMutator):
def __init__(self, mode: MutatorMode,
raise_err: bool = True,
error_to_raise: Optional[ZHttpError] = None,
error_msg: str = None,
cause_msg: str = None,
action=None,
predicate=None):
self.mode = mode
self.error_to_raise = error_to_raise
self.error_msg = error_msg
self.cause_msg = "Mutator execution failed" if cause_msg is None else cause_msg
self.action = action
self.raise_err = raise_err
self.predicate = if_null_get(predicate, lambda x: True)
self.can_run = True
@classmethod
def with_serialize(cls, raise_err: bool = True, error_msg: str = None, cause_msg: str = None,
action=None, error_to_raise: Optional[ZHttpError] = None, predicate=None):
"""
Create mutator with serialization mode
@param predicate:
@param error_to_raise:
@param raise_err:
@param error_msg:
@param cause_msg:
@param action:
@return:
"""
return cls(MutatorMode.SERIALIZATION, raise_err, error_to_raise, error_msg, cause_msg, action, predicate)
@classmethod
def with_deserialize(cls, raise_err: bool = True, error_msg: str = None, cause_msg: str = None,
action=None, error_to_raise: Optional[ZHttpError] = None, predicate=None):
"""
Create mutator with deserialization mode
@param predicate:
@param error_to_raise:
@param raise_err:
@param error_msg:
@param cause_msg:
@param action:
@return:
"""
return cls(MutatorMode.DESERIALIZATION, raise_err, error_to_raise, error_msg, cause_msg, action, predicate)
@classmethod
def with_all(cls, raise_err: bool = True, error_msg: str = None, cause_msg: str = None,
action=None, error_to_raise: Optional[ZHttpError] = None, predicate=None):
"""
Create mutator with deserialization/serialization mode
@param predicate:
@param error_to_raise:
@param raise_err:
@param error_msg:
@param cause_msg:
@param action:
@return:
"""
return cls(MutatorMode.ALL, raise_err, error_to_raise, error_msg, cause_msg, action, predicate)
def exec(self, value: str) -> str:
if self.action is not None:
if self.predicate(value):
return self.action(value)
return value
class Str(Field):
"""A string field.
:param kwargs: The same keyword arguments that :class:`Field` receives.
"""
#: Default error messages.
default_error_messages = {
"invalid": "Not a valid string.",
"not_empty": "The string value can't be empty or null.",
"invalid_utf8": "Not a valid utf-8 string.",
}
def _serialize(self, value, attr, obj, **kwargs) -> typing.Optional[str]:
if value is None:
return None
return self.__apply_str_mappers(
utils.ensure_text_type(value), MutatorMode.SERIALIZATION
)
def _get_mutators(self, mode: MutatorMode) -> List[ZMutator]:
if 'mutators' in self.metadata:
return list(filter(lambda m: m.mode == mode, self.metadata['mutators']))
return []
def __apply_str_mappers(self, value: str, mode: MutatorMode) -> str:
mutators = self._get_mutators(mode)
mutable_value: str = value
for mutator in mutators:
try:
mutable_value = mutator.exec(mutable_value)
except Exception as e:
if mutator.raise_err is True:
if mutator.error_to_raise is not None:
mutator.error_to_raise.internal_exception = e
raise mutator.error_to_raise
raise BadRequest(message=mutator.error_msg, reason=mutator.cause_msg, parent_ex=e)
return mutable_value
def _deserialize(self, value, attr, data, **kwargs) -> typing.Any:
if not isinstance(value, (str, bytes)):
raise self.make_error("invalid")
if 'allow_empty' in self.metadata and self.metadata['allow_empty'] is False:
if not value.strip():
raise self.make_error("not_empty")
try:
return self.__apply_str_mappers(
utils.ensure_text_type(value), MutatorMode.DESERIALIZATION
)
except UnicodeDecodeError as error:
raise self.make_error("invalid_utf8") from error
_T = typing.TypeVar("_T")
class Num(Field):
"""Base class for number fields.
:param bool as_string: If `True`, format the serialized value as a string.
:param kwargs: The same keyword arguments that :class:`Field` receives.
"""
num_type = float # type: typing.Type
#: Default error messages.
default_error_messages = {
"invalid": "Not a valid number.",
"too_large": "Number too large.",
}
def __init__(self, *, as_string: bool = False, **kwargs):
self.as_string = as_string
super().__init__(**kwargs)
def _format_num(self, value) -> typing.Any:
"""Return the number value for value, given this field's `num_type`."""
return self.num_type(value)
def _validated(self, value) -> typing.Optional[_T]:
"""Format the value or raise a :exc:`ValidationError` if an error occurs."""
if value is None:
return None
# (value is True or value is False) is ~5x faster than isinstance(value, bool)
if value is True or value is False:
raise self.make_error("invalid", input=value)
try:
return self._format_num(value)
except (TypeError, ValueError) as error:
raise self.make_error("invalid", input=value) from error
except OverflowError as error:
raise self.make_error("too_large", input=value) from error
def _to_string(self, value) -> str:
return str(value)
def _serialize(
self, value, attr, obj, **kwargs
) -> typing.Optional[typing.Union[str, _T]]:
"""Return a string if `self.as_string=True`, otherwise return this field's `num_type`."""
if value is None:
return None
ret = self._format_num(value) # type: _T
return self._to_string(ret) if self.as_string else ret
def _deserialize(self, value, attr, data, **kwargs) -> typing.Optional[_T]:
return self._validated(value)
# dataclasses extensions
@dataclass
class DefaultValue:
value: Any
@dataclass
class WithDefaults:
def __post_init__(self):
for field in fields(self):
if isinstance(field.default, DefaultValue):
field_val = getattr(self, field.name)
if isinstance(field_val, DefaultValue) or field_val is None:
setattr(self, field.name, field.default.value) | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/utils/objects.py | objects.py |
from multiprocessing import Pool, cpu_count, Process, Queue
from time import perf_counter, sleep
from zpy.logger import ZLogger
from typing import Any, Callable, Iterable, List, Optional, Tuple, Union
_func: Optional[Callable[[Any], Any]] = None
def worker_init(func):
global _func
_func = func
def worker(x):
return _func(x)
def _task_wrapper(idx, queue, task, args):
queue.put((idx, task(*args)))
def mapp(
collection: Iterable[Any],
fn: Callable[[Any], Any],
chunk_size: Optional[int] = 1,
args: Optional[Tuple[Any]] = None,
) -> List[Any]:
"""Parallel Collection Processor
Args:
collection (Iterable[Any]): Iterable
fn (Callable[[Any], Any]): Map function
chunk_size (Optional[int]): chunk size. Default 1
args: Args
Returns:
List[Any]: iterable
"""
n_cpu = cpu_count()
if args:
with Pool(processes=n_cpu) as pool:
return pool.starmap(fn, [(e,) + args for e in collection], chunk_size)
with Pool(processes=n_cpu, initializer=worker_init, initargs=(fn,)) as pool:
return pool.map(worker, collection, chunk_size)
def runp(
tasks: List[Callable[[Any], Any]], args: Optional[List[Tuple[Any]]] = None
) -> List[Any]:
"""Run tasks in parallel.
Args:
tasks (List[Callable[[Any], Any]]): Collection of tasks references
args (Optional[List[Tuple[Any]]], optional): Args of tasks. Defaults to None.
Raises:
ValueError: if the number of args and tasks aren't the same
Returns:
List[Any]: Ordered Tasks result
"""
if args is not None and len(tasks) != len(args):
raise ValueError("Number of args must be equal to number of tasks.")
queue = Queue()
processes = [
Process(
target=_task_wrapper,
args=(i, queue, task, () if not args else args[i]),
)
for i, task in enumerate(tasks)
]
for process in processes:
process.start()
for process in processes:
process.join()
return [value[1] for value in sorted([queue.get() for _ in processes])]
class TaskExecutor:
def __init__(self, task: Any, args=(), logger: ZLogger = None):
self.queue = Queue()
self.task = task
self.args = args
self.logger = logger
def terminator(self, queue: Queue, seconds):
sleep(seconds)
if queue.empty():
queue.put("TASK_FINISHED_BY_TIMEOUT")
def runner(self, queue: Queue, task, args):
queue.put(task(*args))
def run(self, timeout: float = 0.0, default_value: Any = None):
start = perf_counter()
try:
runner = Process(target=self.runner, args=(self.queue, self.task, self.args))
scheduler = Process(target=self.terminator, args=(self.queue, timeout))
scheduler.start()
runner.start()
while True:
if not self.queue.empty():
value = self.queue.get()
if value == 'TASK_FINISHED_BY_TIMEOUT':
if runner.is_alive():
runner.terminate()
if self.logger:
end = perf_counter()
self.logger.info(f"Task execution finished by timeout. {round(end - start, 2)}s.")
return default_value
if scheduler.is_alive():
scheduler.terminate()
if self.logger:
end = perf_counter()
self.logger.info(f"Task execution finished successfully. {round(end - start, 2)}s.")
return value
except Exception as e:
if self.logger:
self.logger.exception("An error occurred while executing task", exc_info=e)
else:
print("An error occurred while executing task", e)
return default_value | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/utils/parallel.py | parallel.py |
from typing import Any, Callable, List, Optional, Tuple, Union
from functools import reduce
import logging
import time
import inspect
from zpy.logger import ZLogger
from zpy.utils.values import if_null_get
from zpy.app import zapp_context as ctx
def safely_exec(callable_fn: Callable, args=None, default_value: Any = None) -> Optional[Any]:
"""
Execute provided function in try:except block
@param default_value:
@param callable_fn:
@param args:
@return: value returned of execution or none
"""
if args is None:
args = []
try:
return callable_fn(*args)
except Exception as e:
logging.exception(e)
return default_value
def safe_exec_wrapper(target: Callable, args=None, kwargs: dict = None,
msg: str = None,
notifier: Optional[Callable] = None,
default_ret=None, throw_ex: bool = False):
logg = ctx().logger
if args is None:
args = []
if not kwargs:
kwargs = {}
try:
return target(*args, **kwargs)
except Exception as e:
msg = if_null_get(msg, f"An error occurred while try execute: {target.__name__} with: {args} and {kwargs}.")
logg.exception(msg)
if notifier:
notifier(f"Fatal: {msg}\n{str(e)}")
if throw_ex:
raise
return default_ret
def exec_if_nt_null(callable_fn: Callable, args: Optional[List[Any]] = None) -> object:
"""
Execute function if args not null
"""
if args is None:
args = []
for arg in args:
if arg is None:
return False
return callable_fn(*args)
def safely_exec_with(callable_fn: Callable, default_value: Any = None, args=None) -> Optional[Any]:
"""
Execute provided function in try:except block
@param default_value:
@param callable_fn:
@param args:
@return: value returned of execution or none
"""
if args is None:
args = []
try:
return callable_fn(*args)
except Exception as e:
logging.exception(e)
return default_value
def get_class_that_defined_method(meth):
for cls in inspect.getmro(meth.im_self):
if meth.__name__ in cls.__dict__:
return cls
return None
def timeit(msg: str = None):
"""
Time execution logger.
@param msg:
@return:
"""
def _timeit_(method):
def timed(*args, **kw):
logg = ctx().logger
ts = time.time()
result = method(*args, **kw)
te = time.time()
method_name = ('{} -> {}'.format(method.__module__, method.__name__)) if not msg else msg
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
logg.info(f"Time Execution: {method_name} :: {(te - ts) * 1000:2.2f} ms.")
return result
return timed
return _timeit_
def safe(default: Any = None, notifier: Any = None, logger: ZLogger = None, err_msg: str = None):
"""
Safe execution
@param logger:
@param err_msg:
@param notifier: Error notifier
@param default: if execution fail return default value
@return:
"""
def __safe_execution__(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
method_name = ('{} -> {}'.format(func.__module__, func.__name__)) if not err_msg else err_msg
error_message = f"An error occurred while executing {method_name}. Msg: {str(e)}"
if notifier:
notifier(error_message)
if logger:
logger.exception(error_message, exc_info=e)
return default
return wrapper
return __safe_execution__
def fn_composite(*func):
"""
Function composition
@param func: functions
@return: composition
"""
def compose(f, g):
return lambda x: f(g(x))
return reduce(compose, func, lambda x: x)
def if_then(value: bool, function: Callable, args: Union[List, Tuple] = None, default_value: Any = None):
"""
If value provided is true, then execute function provided as param.
@param value:
@param function:
@param args:
@param default_value:
@return:
"""
if value:
return function(*if_null_get(args, []))
return default_value
def if_get(value: bool, if_value: Any = None, else_value: Any = None, strict=False):
"""
If value provided is true, then return values provided as param.
@param strict: Force boolean value to compare
@param if_value: value returned if value is true
@param else_value: value returned if value is false
@param value: value to evaluate
@return: Any
"""
if strict is True and value is True:
return if_value
if strict is False and value:
return if_value
return else_value
def if_not_then(value: bool, function: Callable, args: Union[List, Tuple] = None, default_value: Any = None):
"""
If value provided is true, then execute function provided as param.
@param value:
@param function:
@param args:
@param default_value:
@return:
"""
if not value:
return function(*if_null_get(args, []))
return default_value
def if_else_then(value: bool, if_func: Callable = None, if_args: Union[List, Tuple] = None, else_func: Callable = None,
else_args: Union[List, Tuple] = None, if_value: Any = None, else_value: Any = None):
"""
If value provided is true, then execute if_function provided as param otherwise execute else_function
@param else_value:
@param if_value:
@param else_args:
@param else_func:
@param if_args:
@param value:
@param if_func:
@return:
"""
if value:
if if_func:
return if_func(*if_null_get(if_args, []))
return if_value
if else_func:
return else_func(*if_null_get(else_args, []))
return else_value
class Maybe(object):
"""
"""
def __init__(self, value) -> None:
self.value = value
self.exception = None
@classmethod
def unit(cls, value):
return cls(value)
def __getattr__(self, item):
_field = getattr(self.value, item)
if not callable(_field):
return self.bind(lambda _: _field)
return lambda *args, **kwargs: self.bind(lambda _: _field(*args, **kwargs))
def apply(self, fn):
if fn is None:
return self
fn(self.value)
return self
def bind(self, fn):
if self.value is None:
return self
result = fn(self.value)
if isinstance(result, Maybe):
return result
return Maybe.unit(result)
def safe_bind(self, fn):
try:
return self.bind(fn)
except Exception as e:
self.exception = e
return self
def safe_apply(self, fn):
try:
return self.bind(fn)
except Exception as e:
self.exception = e
return self
def optional_bind(self, fn):
return self.safe_bind(fn)
def optional_apply(self, fn):
return self.safe_apply(fn)
def unwrap(self) -> Any:
if self.exception:
raise self.exception
return self.value
def value_or(self, default_value: Any) -> Any:
return if_null_get(self.value, default_value) | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/utils/funcs.py | funcs.py |
import json
import os
import sys
from pathlib import Path
from typing import Callable, List
def read_event(name: str = None, event_file_path: str = None):
event_path = f"{os.getcwd()}\\events\\{name}" if name else event_file_path
if not event_path:
raise ValueError("Path file or name is required...")
with open(event_path, 'r') as sm:
return json.loads(sm.read())
def add_source_to_path(src_dir: str = None, replacement: str = None):
current_dir = src_dir
if not current_dir:
current_dir = os.getcwd()
current_dir = current_dir.replace('\\tests' if not replacement else replacement, '\\src')
sys.path.append(current_dir)
path = Path(current_dir)
sys.path.append(str(path.parent.absolute()))
def get_files_in(directory: str, ext, absolute=False) -> List[str]:
path_files = []
parent = os.getcwd()
for root, dirs, files in os.walk(directory):
for filename in files:
if filename.endswith(ext):
if absolute:
path_files.append(os.path.join(parent, root, filename))
else:
path_files.append(os.path.join(root, filename))
return path_files
def file_content_updater(
file_path: str,
mutator: Callable[[str], str] = None,
find: str = None,
replaced: str = None,
) -> None:
"""
Content File Updater
@param file_path:
@param mutator:
@param find:
@param replaced:
@return:
"""
with open(file_path, "r+b") as f:
content = f.readlines()
for i, line in enumerate(content):
line = line.decode("utf-8")
if mutator:
new: str = mutator(line)
content[i] = new.encode("utf-8")
else:
if line and find in line:
print(f"Found in {file_path} line: {i}... Removed...")
content[i] = line.replace(find, replaced).encode("utf-8")
f.seek(0)
f.truncate()
f.writelines(content) | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/utils/files.py | files.py |
from enum import Enum
from typing import Any, List, Optional, Tuple, Callable, Dict, Union
from requests import post, Response, get, patch, delete, put
from zpy.logger import zL, ZLogger
from datetime import timedelta
class HLevels(Enum):
URL = 1
HEADERS = 2
PARAMS = 3
PAYLOAD = 4
@staticmethod
def all():
return [HLevels.URL, HLevels.HEADERS, HLevels.PARAMS, HLevels.PAYLOAD]
@staticmethod
def url_and_body():
return [HLevels.URL, HLevels.PAYLOAD]
def zhl_write(text: str, value: Any, logger: ZLogger = None):
def wrapper():
if value is not None:
if not logger:
zL.i(f"{text} {value}")
return
logger.info(f"{text} {value}")
return wrapper
class ZHttp:
"""HTTP Wrapper
Raises:
ValueError: [description]
"""
global_options: dict = {"BASE_URL": None, "LOG_OPTIONS": None}
@staticmethod
def setup(base: str, log_options: List[HLevels] = None) -> None:
ZHttp.global_options["BASE_URL"] = base
ZHttp.global_options["LOG_OPTIONS"] = log_options
@staticmethod
def __prepare_request__(
url, path, params, headers, data, json, log_options, logger: ZLogger = None, method: str = 'POST'
) -> Tuple[str, List[HLevels], dict]:
final_url: str = ZHttp.global_options["BASE_URL"]
if url is not None and path is not None:
final_url = f"{url}{path}"
if url is not None and path is None:
final_url = url
if url is None and path is not None:
final_url = f"{final_url}{path}"
if final_url is None:
raise ValueError("URL not configured!")
real_log_options = ZHttp.global_options["LOG_OPTIONS"]
metadata = {
"url": final_url,
"method": method,
"params": params,
"headers": headers,
"body": data if json is None else json
}
try:
data_to_log: Dict[HLevels, Callable] = {
HLevels.URL: zhl_write(f"Start HTTP [{method}] -", final_url, logger),
HLevels.PARAMS: zhl_write(f"Params:", params, logger),
HLevels.HEADERS: zhl_write(f"Headers:", headers, logger),
HLevels.PAYLOAD: zhl_write(f"Body:", data if json is None else json, logger)
}
if log_options is not None and len(log_options) > 0:
real_log_options = log_options
if not real_log_options:
return final_url, real_log_options, metadata
for i in real_log_options:
writer = data_to_log.get(i, None)
if writer:
writer()
except Exception as e:
ZHttp.__log_exception__(logger, "An error occurred while logging http request.")
full_metadata = {
'request': metadata,
'response': {},
'error': None
}
return final_url, real_log_options, full_metadata
@staticmethod
def __log_exception__(logger: ZLogger = None, msg: str = "The http request failed..."):
if not logger:
zL.ex(msg)
return
logger.info(msg)
@staticmethod
def __logging_response__(
result: Response, final_url, log_response_headers, real_log_options, logger: ZLogger = None
) -> dict:
if not real_log_options:
return {}
try:
zhl_write(f"Response raw body:", result.content, logger)()
except Exception as e:
zhl_write(f"ZHttp Error", f"Fail when read raw response content: {e}", logger)()
parsed = result.json()
elapsed_time = str(timedelta(seconds=result.elapsed.total_seconds()))
response_headers = dict(
zip(result.headers.keys(), result.headers.values())) if log_response_headers is True else None
metadata = {
"elapsed": elapsed_time,
"body": parsed,
"status": result.status_code,
"headers": response_headers
}
response_logs: Dict[HLevels, Callable] = {
HLevels.URL: zhl_write(f"End HTTP [POST] - {elapsed_time} - {result.status_code}", final_url, logger),
HLevels.HEADERS: zhl_write(
f"Response Headers:",
response_headers,
logger
),
HLevels.PAYLOAD: zhl_write(f"Response Body:", parsed, logger)
}
for i in real_log_options:
writer = response_logs.get(i, None)
if writer:
writer()
return metadata
@staticmethod
def get(
url: Optional[str] = None,
path: Optional[str] = None,
params: Any = None,
data: Any = None,
headers: Any = None,
cookies: Any = None,
files: Any = None,
auth: Any = None,
timeout: Any = None,
allow_redirects: bool = None,
proxies: Any = None,
hooks: Any = None,
stream: Any = None,
verify: Any = None,
cert: Any = None,
json: Any = None,
log_options: List[HLevels] = None,
log_response_headers: bool = False,
control_failure: bool = False,
logger: ZLogger = None,
wrap_in_zhttp: bool = False
) -> Optional[Union[Response, 'ZHttpResponse']]:
final_url, real_log_level, request_meta = ZHttp.__prepare_request__(
url, path, params, headers, data, json, log_options, logger
)
try:
result: Response = get(
url=final_url,
json=json,
data=data,
params=params,
headers=headers,
cookies=cookies,
files=files,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
proxies=proxies,
hooks=hooks,
stream=stream,
verify=verify,
cert=cert,
)
if real_log_level is not None:
response_metadata = ZHttp.__logging_response__(
result, final_url, log_response_headers, real_log_level, logger
)
request_meta['response'] = response_metadata
if wrap_in_zhttp is True:
return ZHttpResponse.of(result, request_meta)
return result
except Exception as e:
ZHttp.__log_exception__(logger)
if control_failure is False:
raise e
if wrap_in_zhttp is True:
request_meta['error'] = str(e)
return ZHttpResponse.of(None, request_meta)
return None
@staticmethod
def post(
url: Optional[str] = None,
path: Optional[str] = None,
params: Any = None,
data: Any = None,
headers: Any = None,
cookies: Any = None,
files: Any = None,
auth: Any = None,
timeout: Any = None,
allow_redirects: bool = None,
proxies: Any = None,
hooks: Any = None,
stream: Any = None,
verify: Any = None,
cert: Any = None,
json: Any = None,
log_options: List[HLevels] = None,
log_response_headers: bool = False,
control_failure: bool = False,
logger: ZLogger = None,
wrap_in_zhttp: bool = False
) -> Optional[Union[Response, 'ZHttpResponse']]:
final_url, real_log_level, request_meta = ZHttp.__prepare_request__(
url, path, params, headers, data, json, log_options, logger
)
try:
result: Response = post(
url=final_url,
json=json,
data=data,
params=params,
headers=headers,
cookies=cookies,
files=files,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
proxies=proxies,
hooks=hooks,
stream=stream,
verify=verify,
cert=cert,
)
if real_log_level is not None:
response_metadata = ZHttp.__logging_response__(
result, final_url, log_response_headers, real_log_level, logger
)
request_meta['response'] = response_metadata
if wrap_in_zhttp is True:
return ZHttpResponse.of(result, request_meta)
return result
except Exception as e:
ZHttp.__log_exception__(logger)
if control_failure is False:
raise e
if wrap_in_zhttp is True:
request_meta['error'] = str(e)
return ZHttpResponse.of(None, request_meta)
return None
@staticmethod
def put(
url: Optional[str] = None,
path: Optional[str] = None,
params: Any = None,
data: Any = None,
headers: Any = None,
cookies: Any = None,
files: Any = None,
auth: Any = None,
timeout: Any = None,
allow_redirects: bool = None,
proxies: Any = None,
hooks: Any = None,
stream: Any = None,
verify: Any = None,
cert: Any = None,
json: Any = None,
log_options: List[HLevels] = None,
log_response_headers: bool = False,
control_failure: bool = False,
logger: ZLogger = None,
wrap_in_zhttp: bool = False
) -> Optional[Union[Response, 'ZHttpResponse']]:
final_url, real_log_level, request_meta = ZHttp.__prepare_request__(
url, path, params, headers, data, json, log_options, logger
)
try:
result: Response = put(
url=final_url,
json=json,
data=data,
params=params,
headers=headers,
cookies=cookies,
files=files,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
proxies=proxies,
hooks=hooks,
stream=stream,
verify=verify,
cert=cert,
)
if real_log_level is not None:
response_metadata = ZHttp.__logging_response__(
result, final_url, log_response_headers, real_log_level, logger
)
request_meta['response'] = response_metadata
if wrap_in_zhttp is True:
return ZHttpResponse.of(result, request_meta)
return result
except Exception as e:
ZHttp.__log_exception__(logger)
if control_failure is False:
raise e
if wrap_in_zhttp is True:
request_meta['error'] = str(e)
return ZHttpResponse.of(None, request_meta)
return None
@staticmethod
def patch(
url: Optional[str] = None,
path: Optional[str] = None,
params: Any = None,
data: Any = None,
headers: Any = None,
cookies: Any = None,
files: Any = None,
auth: Any = None,
timeout: Any = None,
allow_redirects: bool = None,
proxies: Any = None,
hooks: Any = None,
stream: Any = None,
verify: Any = None,
cert: Any = None,
json: Any = None,
log_options: List[HLevels] = None,
log_response_headers: bool = False,
control_failure: bool = False,
logger: ZLogger = None,
wrap_in_zhttp: bool = False
) -> Optional[Union[Response, 'ZHttpResponse']]:
final_url, real_log_level, request_meta = ZHttp.__prepare_request__(
url, path, params, headers, data, json, log_options, logger
)
try:
result: Response = patch(
url=final_url,
json=json,
data=data,
params=params,
headers=headers,
cookies=cookies,
files=files,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
proxies=proxies,
hooks=hooks,
stream=stream,
verify=verify,
cert=cert,
)
if real_log_level is not None:
response_meta = ZHttp.__logging_response__(
result, final_url, log_response_headers, real_log_level, logger
)
request_meta['response'] = response_meta
if wrap_in_zhttp is True:
return ZHttpResponse.of(result, request_meta)
return result
except Exception as e:
ZHttp.__log_exception__(logger)
if control_failure is False:
raise e
if wrap_in_zhttp is True:
request_meta['error'] = str(e)
return ZHttpResponse.of(None, request_meta)
return None
@staticmethod
def delete(
url: Optional[str] = None,
path: Optional[str] = None,
params: Any = None,
data: Any = None,
headers: Any = None,
cookies: Any = None,
files: Any = None,
auth: Any = None,
timeout: Any = None,
allow_redirects: bool = None,
proxies: Any = None,
hooks: Any = None,
stream: Any = None,
verify: Any = None,
cert: Any = None,
json: Any = None,
log_options: List[HLevels] = None,
log_response_headers: bool = False,
control_failure: bool = False,
logger: ZLogger = None,
wrap_in_zhttp: bool = False,
) -> Optional[Union[Response, 'ZHttpResponse']]:
final_url, real_log_level, request_meta = ZHttp.__prepare_request__(
url, path, params, headers, data, json, log_options, logger
)
try:
result: Response = delete(
url=final_url,
json=json,
data=data,
params=params,
headers=headers,
cookies=cookies,
files=files,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
proxies=proxies,
hooks=hooks,
stream=stream,
verify=verify,
cert=cert,
)
if real_log_level is not None:
response_meta = ZHttp.__logging_response__(
result, final_url, log_response_headers, real_log_level, logger
)
request_meta['response'] = response_meta
if wrap_in_zhttp is True:
return ZHttpResponse.of(result, request_meta)
return result
except Exception as e:
ZHttp.__log_exception__(logger)
if control_failure is False:
raise e
if wrap_in_zhttp is True:
request_meta['error'] = str(e)
return ZHttpResponse.of(None, request_meta)
return None
class ZHttpResponse(object):
"""
Wrapper
"""
@classmethod
def of(cls, response: Response, metadata: dict = None):
return cls(response, metadata)
def __init__(self, response: Response, metadata: dict = None):
self.raw = response
self.metadata = metadata
def json(self) -> dict:
return self.raw.json()
def meta(self) -> dict:
return self.metadata
def status_is(self, status: int) -> bool:
return False if self.raw is None else self.raw.status_code == status
def status_is_and(self, status: int, action: Callable[[Response], Optional[Any]]) -> Optional[Any]:
"""
@param status:
@param action:
@return:
"""
if self.raw is not None and self.raw.status_code == status:
return action(self.raw)
return None
def is_ok(self) -> bool:
"""
@return: http status response is success
"""
return False if self.raw is None else self.raw.ok | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/utils/http_client.py | http_client.py |
from abc import ABC
from enum import Enum
from time import sleep
from typing import Any, Callable, List
from zpy.logger import ZLogger
class ZBuilder(ABC):
def update(self, property_name: Any, value: Any):
setattr(self, str(property_name), value)
return self
class ZRetryPolicyProp(Enum):
DELAY = 'delay' # float: Seconds delay on each attempt
ATTEMPTS = 'attempts' # int: Attempts number
LOG_MSG = 'log_message' # str: Log error message
DEFAULT_VALUE = 'default_value' # Any: Default value to return
RAISE_EXCEPT = 'raise_except' # bool: True if raise exceptions
ENABLE_VERBOSE = 'verbose' # bool: True if print logs
def __str__(self) -> str:
return self.value
class ZRetryPolicyBuilder(ZBuilder):
def __init__(self, *args, **kwargs) -> None:
ZBuilder.__init__(self)
self.delay = 0.0
self.args = args
self.kwargs = kwargs
self.attempts = 1
self.on_excepts = None
self.on_results = None
self.logger = None
self.log_message = "Failed"
self.default_value = None
self.raise_except = False
self.verbose = True
def on_excepts(self, excepts: List[Exception]):
self.on_excepts = excepts
return self
def on_results(self, results: List[Any]):
self.on_results = results
return self
def with_logger(self, logger):
self.logger = logger
def update(self, property_name: ZRetryPolicyProp, value: Any) -> 'ZRetryPolicyBuilder':
super().update(property_name, value)
return self
def build(self) -> 'ZRetryer':
return ZRetryer(self)
class ZRetryer:
def __init__(self, policy: ZRetryPolicyBuilder) -> None:
self.policy = policy
def with_default(self, value: Any) -> 'ZRetryer':
self.policy.default_value = value
return self
def with_log_message(self, value: Any) -> 'ZRetryer':
self.policy.log_message = value
return self
def __log_message(self, value: Any):
if self.policy.verbose is True:
print(value)
def call(self, task: Callable[[Any], Any], *args, **kwargs):
max_attempts = self.policy.attempts
except_detected = None
while max_attempts >= 0:
if max_attempts != self.policy.attempts:
self.__log_message(
"Retrying \'{}(*args)\' function invocation".format(task.__name__))
try:
x_result = task(*args, **kwargs)
if self.policy.on_results and x_result in self.policy.on_results:
sleep(self.policy.delay)
max_attempts = max_attempts - 1
continue
return x_result
except Exception as e:
except_detected = e
if self.policy.on_excepts and type(e) not in self.policy.on_excepts:
return self.policy.default_value
sleep(self.policy.delay)
max_attempts = max_attempts - 1
if except_detected:
if self.policy.log_message:
self.__log_message(
f"{self.policy.log_message} - [RETRYER] Failed {self.policy.attempts} times.")
else:
self.__log_message(
f"[RETRYER] Failed {self.policy.attempts} times.")
if self.policy.raise_except is True:
raise except_detected
return self.policy.default_value
def retry(attempts: int = 1, delayed: float = 0.5, on_excepts: List[Exception] = None, on_results: List[Any] = None,
default_result=None, log_message: str = None, logger: ZLogger = None, raise_except=False) -> Any:
"""
Function decorator for retry function execution
@param attempts: Number of attempts to retry.
@param delayed: Time delay between each attempt
@param on_excepts: Exceptions list to retry function execution
@param on_results: Exceptions results to retry function execution
@param default_result: Default value returned if the function failed
@param log_message: Message for log
@param logger: ZLogger instance
@param raise_except: True if you need raise exception otherwise default value returned
@return: Function result or default value
"""
def decorator(fun):
def wrapper(*args, **kwargs):
max_attempts = attempts
except_detected = None
while max_attempts >= 0:
if logger and max_attempts != attempts:
logger.info("Retrying \'{}(*args)\' function invocation".format(fun.__name__))
try:
result = fun(*args, **kwargs)
if on_results and result in on_results:
sleep(delayed)
max_attempts = max_attempts - 1
continue
return result
except Exception as e:
except_detected = e
if on_excepts and type(e) not in on_excepts:
return default_result
sleep(delayed)
max_attempts = max_attempts - 1
if except_detected:
if log_message and logger:
logger.err(f"{log_message} - [RETRYER] Failed {attempts} times.")
if raise_except is True:
raise except_detected
return default_result
return wrapper
return decorator | zpy-api-core | /zpy-api-core-1.8.0.tar.gz/zpy-api-core-1.8.0/zpy/utils/tools/retryer.py | retryer.py |
<p align="center">
<a href="https://github.com/NoeCruzMW/zpy-flask-msc-docs"><img width="150" src="https://lh3.googleusercontent.com/a-/AOh14GjLO5qYYR5nQl5hgavUKz4Dv3LVzWDvGtV4xNam=s600-k-no-rp-mo" alt="Zurck'z"></a>
</p>
<p align="center">
<em>ZPy Cloud Utils, Layer for build microservices</em>
</p>
<p align="center"></p>
---
# ZPy Flask Core
> Zurck'z Py Flask Micro Services Core
This package contains some helpers features for build python microservices using Flask framework
ZPy use the following packages:
- boto3
- ibm-sdk
- gcloud
## Requirements
- Python 3.6+
## Installation
Use the package manager [pip](https://pip.pypa.io/en/stable/) to install py flask micro service core .
```bash
pip install boto3
pip install zpy
```
## Features
Contains some helper features with specific integrations.
- Api
- Api Builder
- Response Builder
- Models
- Hooks
- Middlewares
- Exceptions
- Repositories
- Only oracle repository implementation for functions calling.
- Cloud Implementations
- AWS Services
- S3
- SSM
- Firehose
- SQS
- Custom
- Plugings
- Database
- Only Oracle implementation
- Functions executor
- Logger
- Stream
- Utils
- Collections
- Cipher
- Functions
- gzip
## Basic Usage
Define restful resource
```python
from zpy.api.resource import ZResource, HTTP_METHODS
class UserResource(ZResource):
def __init__(self, **kwargs) -> None:
super().__init__()
# Receive any dependency by keywords arguments
def get(self):
l, i = super().new_operation()
try:
return self.success({"user": {"name": "Zurckz"}}, logger=l)
except Exception as e:
return self.handle_exceptions(e, l, i)
```
Setup api
```python
# Define api
@api(base='/v1', config=config)
def create_api():
# Set all supported resource for this web service.
return
[
ZResource('/', UserResource)
]
```
Local Dev Deploy
```python
from api import create_api
app = create_api()
# π¨ Only use it in local tests π»
if __name__ == "__main__":
app.run(host="localhost", debug=True)
```
## Contributing
Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change.
Please make sure to update tests as appropriate.
## License
[MIT](https://choosealicense.com/licenses/mit/)
## Authors
[NoΓ© Cruz](https://www.linkedin.com/in/zurckz/)
| zpy-cloud-utils | /zpy-cloud-utils-1.1.2.tar.gz/zpy-cloud-utils-1.1.2/README.md | README.md |
from typing import Any, Dict, List
from zcloud.aws import AWSCredentials, AWS_DEFAULT_REGION
import boto3
import json
class Firehose:
fh_client = None
with_profile: bool = False
def __init__(
self,
credentials: AWSCredentials = None,
with_profile: bool = True,
) -> None:
self.with_profile = with_profile
if with_profile or credentials is None:
self.fh_client = boto3.client("firehose", region_name=AWS_DEFAULT_REGION)
else:
self.fh_client = boto3.client(
"firehose",
aws_access_key_id=credentials.access_key,
aws_secret_access_key=credentials.secret_key,
region_name=credentials.region,
)
def send_data_record(self, data: dict, stream_name: str):
"""
Put one record to delivery stream
"""
try:
response = self.fh_client.put_record(
DeliveryStreamName=stream_name, Record=self.__prepare_record(data)
)
return response
except Exception as e:
print(e)
return None
def __prepare_record(self, data: Dict) -> Dict:
dumped = json.dumps(data)
encoded = dumped.encode("utf-8")
return {"Data": encoded}
def send_batch_data(self, data: List[Dict[Any, Any]], stream_name: str):
"""
Put one record to delivery stream
:data List of record to send firehouse
"""
try:
records = list(map(self.__prepare_record, data))
response = self.fh_client.put_record_batch(
DeliveryStreamName=stream_name, Records=records
)
return response
except Exception as e:
print(e)
return None
def describe_stream(
self, stream_name: str, limit: int = 123, start_id: str = None
) -> dict:
try:
response = self.fh_client.describe_delivery_stream(
DeliveryStreamName=stream_name,
Limit=limit,
ExclusiveStartDestinationId=start_id,
)
return response
except Exception as e:
print(e)
return None | zpy-cloud-utils | /zpy-cloud-utils-1.1.2.tar.gz/zpy-cloud-utils-1.1.2/zcloud/aws/firehose.py | firehose.py |
from typing import Dict, Union, Optional, Any
from zpy.utils.objects import ZObjectModel
from zcloud.aws import AWSCredentials, AWS_DEFAULT_REGION
import boto3
import json
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
class SSMParameter:
"""
AWS Simple System Manager
"""
ssm_client = None
with_profile: bool = False
store = {}
prefix: str = None
def __init__(
self,
credentials: AWSCredentials = None,
with_profile: bool = True,
prefix: str = None,
) -> None:
self.with_profile = with_profile
self.prefix = prefix
if with_profile or credentials is None:
self.ssm = boto3.client("ssm", region_name=AWS_DEFAULT_REGION)
else:
self.ssm = boto3.client(
"ssm",
aws_access_key_id=credentials.access_key,
aws_secret_access_key=credentials.secret_key,
region_name=credentials.region,
)
@classmethod
def create(cls, prefix="/aws/reference/secretsmanager/"):
return cls(prefix=prefix)
def get_from_cache(self, name: str, model: ZObjectModel = None) -> Optional[Union[Dict, ZObjectModel]]:
"""
Get parameter stored in cache.
"""
if name not in self.store:
return None
data = self.store[name]
if model is None:
return data
return model(**data)
def get(
self,
prefix: str = None,
decryption: bool = True,
store: bool = False,
store_name: str = "",
model: Union[Any, ZObjectModel] = None,
refresh: bool = False,
) -> Union[Dict, ZObjectModel]:
"""
Get parameter from AWS SSM
"""
if store_name in self.store and refresh is False:
data = self.store[store_name]
if model is not None:
return model(**data)
return data
if prefix is None and self.prefix is None:
raise Exception("Prefix or parameter name didnt provided.")
real_path = prefix or ""
if self.prefix is not None:
real_path = f"{self.prefix}{real_path}"
parameter = self.ssm.get_parameter(Name=real_path, WithDecryption=decryption)
if store and store_name:
self.store[store_name] = json.loads(parameter["Parameter"]["Value"])
if model is None:
return json.loads(parameter["Parameter"]["Value"])
return model(**json.loads(parameter["Parameter"]["Value"])) | zpy-cloud-utils | /zpy-cloud-utils-1.1.2.tar.gz/zpy-cloud-utils-1.1.2/zcloud/aws/ssm.py | ssm.py |
from abc import ABC, abstractmethod
from enum import Enum
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
from typing import Any, Optional
from boto3 import Session, client
AWS_DEFAULT_REGION = "us-east-1"
AWS_DNS = "https://s3.amazonaws.com/"
class AWSCredentials:
def __init__(self, ak: str = None, sk: str = None, st: str = None, region: str = AWS_DEFAULT_REGION,
profile_name: str = None) -> None:
self.access_key = ak
self.secret_key = sk
self.session_token = st
self.profile_name = profile_name
self.region = region
class AWSSession:
def __init__(self, credentials: Optional[AWSCredentials] = None, config: dict = None):
self.config = config
self.credentials = credentials
self.__session = None
self.__initialize()
def __initialize(self):
if self.credentials:
self.__session = Session(
aws_access_key_id=self.credentials.access_key,
aws_secret_access_key=self.credentials.secret_key,
region_name=self.credentials.region,
profile_name=self.credentials.profile_name
)
return
self.__session = Session()
def client(self, name: str, **kwargs):
return self.__session.client(name, **kwargs)
def resource(self, name: str, **kwargs):
return self.__session.resource(name, **kwargs)
class AWSService(ABC):
def __init__(self, name: str, credentials: Optional[AWSCredentials] = None, config: dict = None):
self.name = name
self.config = config
self.credentials = credentials
self.__client = None
self.__session = None
self.__initialize()
def __initialize_client(self):
if self.__session:
self.client = self.__session.client(self.name, self.credentials.region)
return
self.__client = client(self.name, config=self.config)
def __initialize(self):
if self.credentials:
self.__session = Session(
aws_access_key_id=self.credentials.access_key,
aws_secret_access_key=self.credentials.secret_key,
region_name=self.credentials.region,
profile_name=self.credentials.profile_name
)
self.__initialize_client()
def get_session(self) -> Session:
return self.__session
def configure_session(self, session: Session) -> None:
self.__session = session
self.__initialize_client()
def get_client(self) -> Any:
return self.__client
def configure_client(self, new_client: Any) -> Any:
self.__client = new_client
class CredentialsMode(Enum):
PROFILE = 1
CREDENTIALS = 2 | zpy-cloud-utils | /zpy-cloud-utils-1.1.2.tar.gz/zpy-cloud-utils-1.1.2/zcloud/aws/__init__.py | __init__.py |
import json
from botocore.exceptions import ClientError
import boto3
from typing import Any, Dict
from . import AWSCredentials, CredentialsMode, AWS_DEFAULT_REGION
from enum import Enum
from botocore.client import Config
class ClientMethod(Enum):
GET = 'get_object'
PUT = 'put_object'
class HttpMethod(Enum):
GET = 'GET'
PUT = 'PUT'
class S3:
initialized: bool = False
s3_client = None
with_profile: bool = True
def __init__(
self,
credentials: AWSCredentials = None,
bucket: str = None,
initialize: bool = False,
with_profile: bool = True,
) -> None:
self.credentials = credentials
self.bucket = bucket
self.with_profile = with_profile
if initialize:
self.__init_client(credentials, with_profile)
def set_credentials_mode(self, mode: CredentialsMode):
if mode == CredentialsMode.CREDENTIALS:
self.with_profile = False
return True
self.with_profile = True
return True
def __init_client(self, credentials: AWSCredentials, profile: bool = True):
if credentials is None and profile is False:
raise Exception("Credentials didn't provided")
if credentials is not None and profile is False:
self.s3_client = boto3.client(
"s3",
aws_access_key_id=credentials.access_key,
aws_secret_access_key=credentials.secret_key,
# aws_session_token=credentials.session_token,
region_name=AWS_DEFAULT_REGION,
)
else:
self.s3_client = boto3.client(
"s3",
region_name=AWS_DEFAULT_REGION,
config=Config(signature_version='s3v4')
)
self.initialized = True
def get(
self, full_key: str, bucket: str = None, credentials: AWSCredentials = None
) -> Any:
"""
Get object from s3 bucket
"""
real_bucket = self.__validate(bucket, credentials)
s3_object = self.s3_client.get_object(Bucket=real_bucket, Key=full_key)
content = s3_object["Body"].read()
return content
def download(
self,
full_key: str,
bucket: str,
local_file: str,
credentials: AWSCredentials = None,
) -> Any:
"""
Get object from s3 bucket
"""
real_bucket = self.__validate(bucket, credentials)
self.s3_client.download_file(real_bucket, full_key, local_file)
return True
def get_json(
self, full_key: str, bucket: str = None, credentials: AWSCredentials = None
):
json_obj = self.get(full_key, bucket, credentials)
return json.loads(json_obj.decode("utf-8"))
def put(
self,
object_value: Any,
full_key: str,
bucket: str = None,
credentials: AWSCredentials = None,
) -> Any:
"""
Put object from s3 bucket
"""
real_bucket = self.__validate(bucket, credentials)
result = self.s3_client.put_object(Body=object_value, Bucket=real_bucket, Key=full_key)
return result
def put_json(
self,
json_object: Dict,
full_key: str,
bucket: str = None,
credentials: AWSCredentials = None,
):
"""
Upload JSON|DICT object to S3
:param json_object:
:param full_key:
:param bucket:
:param credentials:
"""
json_parsed = str(json.dumps(json_object))
return self.put(json_parsed, full_key, bucket, credentials)
def upload(
self,
object_up: Any,
object_name: str,
bucket: str = None,
credentials: AWSCredentials = None,
) -> bool:
try:
real_bucket = self.__validate(bucket, credentials)
response = self.s3_client.upload_fileobj(
object_up, real_bucket, object_name
)
return response
except ClientError as e:
print(e)
return None
def __validate(self, bucket: str = None, credentials: AWSCredentials = None) -> str:
"""
Verify aws credentials
"""
real_bucket = self.bucket if bucket is None else bucket
if real_bucket is None:
raise Exception("Bucket didn't provided")
if self.initialized is False:
self.__init_client(self.credentials if credentials is None else credentials)
return real_bucket
def pre_signed_url(self,
key: str,
bucket: str = None,
method_method: ClientMethod = ClientMethod.GET,
expiration: int = 3600,
http_method: HttpMethod = HttpMethod.GET) -> str:
real_bucket = self.__validate(bucket, self.credentials)
pre_signed_url = self.s3_client.generate_presigned_url(
ClientMethod=method_method.value,
Params={'Bucket': real_bucket, 'Key': key},
ExpiresIn=expiration,
HttpMethod=http_method.value
)
return pre_signed_url | zpy-cloud-utils | /zpy-cloud-utils-1.1.2.tar.gz/zpy-cloud-utils-1.1.2/zcloud/aws/s3.py | s3.py |
from enum import Enum
from typing import Any, Optional
from botocore.client import Config
from zpy.containers.MemoryCacheSystem import MemoryCacheSystem
from zcloud.aws import AWSCredentials, AWSSession
class Custom:
def __init__(self, name: str):
self.value = name
@property
def v(self):
return self.value
class AwsServices(Enum):
COGNITO_IDP = 'cognito-idp'
LAMBDA = 'lambda'
DYNAMO = 'dynamodb'
REKOGNITION = 'rekognition'
SQS = 'sqs'
S3 = 's3'
@property
def v(self):
return self.value
@staticmethod
def new(name: str):
return Custom(name)
class AwsClientFactory(MemoryCacheSystem):
Services = AwsServices
def __init__(self, credentials: Optional[AWSCredentials] = None, config: Config = None):
super().__init__()
self.credentials = credentials
self.config = config
self.session = AWSSession(credentials, config)
def get_client(self, service: AwsServices, **kwargs) -> Any:
service_name = f'c{service.value}'
client = self.get_or(service_name, None)
if client:
return client
client = self.new_client(service, **kwargs)
self.set(service_name, client)
return client
def get_resource(self, service: AwsServices, **kwargs) -> Any:
resource_name = f'r{service.value}'
resource = self.get_or(resource_name, None)
if resource:
return resource
client = self.new_resource(service, **kwargs)
self.set(resource_name, client)
return client
def new_client(self, service: AwsServices, **kwargs):
if kwargs and 'config' not in kwargs:
kwargs['config'] = self.config
return self.session.client(name=service.value, **kwargs)
def new_resource(self, service: AwsServices, **kwargs):
if kwargs and 'config' not in kwargs:
kwargs['config'] = self.config
return self.session.resource(name=service.value, **kwargs) | zpy-cloud-utils | /zpy-cloud-utils-1.1.2.tar.gz/zpy-cloud-utils-1.1.2/zcloud/aws/helpers/aws_client_factory.py | aws_client_factory.py |
from enum import Enum
from typing import Any
from zcloud.aws.secret_manager import SecretManager
from zcloud.aws.ssm import SSMParameter
from zpy.utils.parallel import TaskExecutor
from zpy.logger.utils import Loggable
import os
class SecretProvider(Enum):
SSM = 'ssm'
SECRET_MANAGER = 'secretsmanager'
class SecretProviderManager(Loggable):
def __init__(self, main_provider: SecretProvider, provider_timeout: float = 2.0):
Loggable.__init__(self)
self.priority = main_provider
self.ssm = SSMParameter.create()
self.sm = SecretManager()
self.timeout = provider_timeout
def __retrieve_with_ssm(self, name: str):
return TaskExecutor(task=self.ssm.get, args=(name,), logger=self.logger).run(self.timeout)
def __retrieve_with_sm(self, name: str):
return TaskExecutor(task=self.sm.get_value, args=(name,), logger=self.logger).run(self.timeout)
def __retrieve(self, name: str, parallel: bool):
if self.priority == SecretProvider.SECRET_MANAGER:
if parallel:
result = self.__retrieve_with_sm(name)
else:
result = self.sm.get_value(name)
if not result:
if parallel:
return self.__retrieve_with_ssm(name)
return self.ssm.get(name)
return result
if parallel:
result = self.__retrieve_with_ssm(name)
else:
result = self.ssm.get(name)
if not result:
if parallel:
return self.__retrieve_with_sm(name)
return self.sm.get_value(name)
return result
def get_value(self, name: str, model: Any = None):
value = self.__retrieve(name, False)
if value and model:
return model(**value)
return value
def get(self, name: str, model: Any = None):
"""
Retrieve parameter
:param model:
:param name: parameter name
:return:
"""
use_parallel = os.name != 'nt'
value = self.__retrieve(name, use_parallel)
if value and model:
return model(**value)
return value
def configure(self, ssm: SSMParameter, sm: SecretManager):
self.sm = sm
self.ssm = ssm
@classmethod
def create_with_sm(cls, provider_timeout: float = 2.0) -> 'SecretProviderManager':
"""
Create instance using Secret Manager as main provider.
:return:
"""
return cls(SecretProvider.SECRET_MANAGER, provider_timeout)
@classmethod
def create_with_ssm(cls, provider_timeout: float = 2.0) -> 'SecretProviderManager':
"""
Create instance using Simple System Manager as main provider.
:return:
"""
return cls(SecretProvider.SSM, provider_timeout) | zpy-cloud-utils | /zpy-cloud-utils-1.1.2.tar.gz/zpy-cloud-utils-1.1.2/zcloud/aws/helpers/aws_secret_provider_manager.py | aws_secret_provider_manager.py |
# zpy-database v0.1.1
> [Zguillez](https://zguillez.io) | Guillermo de la Iglesia
## Pyton database wrapper
# Getting Started
## Install
```
pip install --upgrade zpy-database
```
# Usage
```
from zpy_database import database as db
db.connect({
'conn': os.environ['DB_HOST'],
'database': os.environ['DB_NAME'],
'user': os.environ['DB_USER'],
'password': os.environ['DB_PASS']
})
```
```
data = db.sql("SELECT id, name FROM my_table")
print(data[0])
```
```
data = db.dict("SELECT id, name FROM my_table", ['id', 'name'])
print(data[0]['name'])
```
```
db.close()
```
# Contributing and issues
Contributors are welcome, please fork and send pull requests! If you have any ideas on how to make this project better
then please submit an issue or [email](mailto:[email protected]) me.
# License
Β©2023 Zguillez.IO
Original code licensed under [MIT](https://en.wikipedia.org/wiki/MIT_License) Open Source projects used within this
project retain their original licenses.
# Changelog
### v0.1.1 (February 13, 2023)
* Initial commit
| zpy-database | /zpy-database-0.1.1.tar.gz/zpy-database-0.1.1/README.md | README.md |
from zdb.mysql import ZMySQL<p align="center">
<a href="https://github.com/NoeCruzMW/zpy-flask-msc-docs"><img width="150" src="https://lh3.googleusercontent.com/a-/AOh14GjLO5qYYR5nQl5hgavUKz4Dv3LVzWDvGtV4xNam=s600-k-no-rp-mo" alt="Zurck'z"></a>
</p>
<p align="center">
<em>ZDB Core, Layer for connect to mysql, postgresql or oracle from python</em>
</p>
<p align="center"></p>
---
# ZPy Database Core
> Zurck'z Py
This package contains some helpers features for call function or stored procedures from python.
ZPy use the following packages:
- mysql-connector-python
- cx-Oracle
- psycopg2
## Requirements
- Python 3.6+
## Installation
Use the package manager [pip](https://pip.pypa.io/en/stable/) to install py flask micro service core .
```bash
pip install zpy
pip install package_directory directory_to_install
```
## Features
Contains some helper features with specific integrations.
- Database
- Functions executor
- Stored Procedures executor
- Autocommit is false by default
- Utils
- Context Manager Helper
## Roadmap
- ActiveRecord implementation
- Cluster
- Parallel parsed
## Basic Usage
Basic Configuration
````python
config = {
"user": "",
"password": "",
"database": "",
"host": "",
"port": 3306
}
````
With single MySQL datasource
```python
from zdb.mysql import ZMySQL
from zdb import ZDBTransact
# Create database instance for MySQL
mysql_db = ZMySQL.from_of(user="", password="", host="", db_name="")
# If you only execute one operation you can call directly
# Connection automatically opened and commit and close
[user] = mysql_db.call("FN_GET_USER_BY_ID", list_params=[1])
# Open connection using Context Manager
with ZDBTransact(mysql_db) as tr:
payments = mysql_db.call("FN_GET_USER_PAYMENTS", list_params=[1], connection=tr.session)
for payment in payments:
mysql_db.call("FN_UPDATE_PAYMENT", list_params=[payment['id']], connection=tr.session)
```
Multiple Datasources
```python
# Define db mediator
# Setup base configuration in ZMediator()
# The base configuration will be overwritten by add common values
db_mngr = ZMediator(config, False)
.add_common("DB_NAME_1", "DB_USER", "DB_PASSWORD", True) # Mark default ds
.add_common("DB_NAME_2", "DB_USER", "DB_PASSWORD")
.add_common("DB_NAME_3", "DB_USER", "DB_PASSWORD")
db_conn1 = db_mngr.default().new_connect()
db_conn2 = db_mngr.get("DB_NAME_1").new_connect()
db_conn3 = db_mngr.get("DB_NAME_3").new_connect()
try:
# Execute function
res = db_mngr.default().exec("FN_GET_USER_BY_ID(%d)", list_params=[1], ret_type=DBTypes.cursor)
print(res)
# Execute function
res = db_mngr.get("DB_NAME_2").exec("FN_GET_USER_BY_ID(%d)", list_params=[1], ret_type=DBTypes.cursor)
print(res)
# Call sp
res = db_mngr.get("DB_NAME_3").call("SP_GET_DATA", ret_type=DBTypes.cursor)
print(res)
except Exception as e:
logging.exception(e)
finally:
# β Remember close opened connections
db_conn1.close()
db_conn2.close()
db_conn3.close()
```
````python
session = self.db.new_connect()
try:
count = self.db.call('SP_GENERIC_GET_ROWS', list_params=['TAROLES'], ret_type=DBTypes.integer,
connection=session)
return Pager.create(data.pagination, count)
finally:
session.close()
````
## Contributing
Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change.
Please make sure to update tests as appropriate.
## License
[MIT](https://choosealicense.com/licenses/mit/)
## Authors
[NoΓ© Cruz](https://www.linkedin.com/in/zurckz/)
| zpy-db-core | /zpy-db-core-1.2.2.tar.gz/zpy-db-core-1.2.2/README.md | README.md |
from zpy import EnvironmentKeys
from typing import Dict, List, Optional
from zpy.utils.collections import find
from zpy.utils import get_env as var
from re import search
import os
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
def get_schema(fn: str, env=None, schemas=None) -> str:
"""
Get current schema according environment
PD: You should be configured schemas list in repositories
"""
real_env = os.getenv("env") if env is None else env
if real_env is None:
raise "Environment not configured"
schema = find(schemas, lambda sc: sc["ENVIRONMENT"] == real_env)
if schema:
return "{}.{}".format(schema["value"], fn)
raise "Not schema configured"
def get_schema_by(schemas: List[Dict], env=None) -> Optional[str]:
"""
Get current schema according environment
PD: You should be configured schemas list in repositories
"""
real_env = os.getenv("ENVIRONMENT") if env is None else env
if real_env is None:
raise None
schema = find(schemas, lambda sc: sc["ENVIRONMENT"] == real_env)
if schema:
return schema["value"]
return None
def get_current_schema(schemas: List[Dict], local_emv: str = None, global_env: str = None):
real_env = global_env if local_emv is None else local_emv
external_env = var(str(EnvironmentKeys.ENVIRONMENT))
real_env = external_env if real_env is None else real_env
if real_env is not None:
if schemas is not None:
db_schema = get_schema_by(schemas, real_env)
return db_schema
return None
def displayable_message(raw_message: str, default: str = "") -> str:
"""
Extract displayable message for emit to final client.
The message must be wrapped by {}
Ex: Failed {The customer provided doest exist.}
@param raw_message: message
@param default: if not found any message return this value.
"""
if not raw_message:
return default
result = search('{(.*)}', raw_message)
if not result:
return default
return result.group(1).strip() | zpy-db-core | /zpy-db-core-1.2.2.tar.gz/zpy-db-core-1.2.2/zdb/utils.py | utils.py |
from dataclasses import asdict
from typing import Optional, Any, List, Callable, TypeVar
from psycopg2 import connect, DatabaseError
from zpy.utils.funcs import safely_exec
from zdb.commons import show_info, DBTypes, get_map_type, ZDatabase, ZDBConfig, build_params, process_exception
T = TypeVar("T")
def __fn_extract_data(result_set, rows, column_names, ret_type: DBTypes, model, jsonfy=False) -> Any:
"""
Inner Data Builder
"""
if ret_type == DBTypes.cursor:
if model is not None:
return [model(**dict(zip(column_names, r))) for r in rows]
if jsonfy is True:
return [dict(zip(column_names, r)) for r in rows]
return rows
else:
if len(rows) > 0:
parser = get_map_type(ret_type)
if model is not None:
return model(**dict(zip(column_names, rows[0])))
if jsonfy is True:
return dict(zip(column_names, rows[0]))
if ret_type == DBTypes.single_item:
return rows[0]
return parser(rows[0][0])
return None
def extract_data(result_set, ret_type: DBTypes, cursor, model, jsonfy=False) -> Any:
"""
Data Builder for custom stored procedures
"""
# for result in cursor.stored_results():
# colum_names = result.column_names
colum_names = [desc[0] for desc in cursor.description]
rows = cursor.fetchall()
if not ret_type:
return rows
return __fn_extract_data(result_set, rows, colum_names, ret_type, model, jsonfy)
def fn_extract_data(result_set, ret_type: DBTypes, cursor, model, jsonfy=False) -> Any:
"""
Data Builder for custom functions
"""
colum_names = []
if cursor.description:
colum_names = [desc[0] for desc in cursor.description]
if ret_type in [DBTypes.out_integer, DBTypes.out_bool]:
rows = cursor.fetchall()
if rows:
return __fn_extract_data(result_set, rows, colum_names, ret_type, model, jsonfy)
if not cursor.rownumber:
return None
rows = cursor.fetchall()
if not ret_type:
return rows
return __fn_extract_data(result_set, rows, colum_names, ret_type, model, jsonfy)
class ZPostgres(ZDatabase):
def __init__(self, dict_config: Optional[dict] = None, config: Optional[ZDBConfig] = None,
verbose: bool = False, auto_commit: bool = True):
self.dict_config = dict_config
self.config = config
self.verbose = verbose
self.auto_commit: bool = auto_commit
@classmethod
def setup(cls, config: dict, verbose: bool = False):
return cls(config, config=None, verbose=verbose)
@classmethod
def setup_of(cls, config: ZDBConfig, verbose: bool = False):
return cls(config=config, verbose=verbose)
@classmethod
def from_dict(cls, config: dict, verbose: bool = False, **kwargs):
"""
dict should be contain the next keys:
dbname=
user=
password=
host=
port=
"""
config.update(kwargs)
return cls(config, verbose=verbose)
@classmethod
def from_of(cls, user: str, password: str, host: str, db_name: str, verbose: bool = False):
return cls({
'user': user,
'password': password,
'host': host,
'database': db_name,
'raise_on_warnings': False
}, verbose=verbose)
def new_connect(self) -> Any:
final_config = self.__validate_config()
if final_config is None:
raise ValueError("The value of db configuration can not be null.")
connection = connect(**final_config)
connection.autocommit = self.auto_commit
return connection
def exec(self, name: str, ret_type: Optional[DBTypes] = None, params: Optional[dict] = None,
list_params: Optional[List[Any]] = None,
model: Optional[Any] = None,
connection=None, jsonfy: bool = False, throw: bool = True) -> Any:
"""Function executor
Args:
name (str): Stored procedure name
ret_type (DBTypes): Type of data returned from stored procedure
params (dict, optional): params for the procedure. Defaults to None.
list_params (List[Any], optional): positional list params to the procedure. Defaults to None.
model (Any, optional): model for build returned data. Defaults to None.
connection ([type], optional): connection database. Defaults to None.
jsonfy (bool, optional): return data in dict format if model is null. Defaults to False.
throw (bool, optional): raise exceptions
Returns:
Any: processed data
"""
return self.__call(lambda c, fn, p: c.callproc(fn, p), extract_data, name, ret_type, params, list_params, model,
connection, jsonfy, throw)
def __call(self, runner: Callable, extractor: Callable, name: str, ret_type: DBTypes, params: dict = None,
list_params: List[Any] = None,
model: Any = None,
connection=None, jsonfy: bool = False, throw: bool = True) -> Any:
cn = connection
connection_passed = True
if connection is None:
connection_passed = False
cn = self.new_connect()
cursor = None
if cn is not None:
cursor = cn.cursor()
if cn is None or cursor is None:
raise Exception("Can't get db connection")
if self.verbose:
show_info(name, params, ret_type, model)
try:
result_set = runner(cursor, name, build_params(dict_params=params, list_params=list_params))
return extractor(result_set, ret_type, cursor, model, jsonfy)
except DatabaseError as error:
process_exception(throw, error)
except Exception as e:
process_exception(throw, e)
finally:
if not connection_passed:
safely_exec(lambda c: c.close(), args=[cursor])
if cn.closed == 0:
safely_exec(lambda c: c.commit(), args=[cn])
safely_exec(lambda c: c.close(), args=[cn])
return None
def call(self, fn_name: str, ret_type: DBTypes = None, params: dict = None, list_params: List[Any] = None,
model: Any = None,
connection=None, jsonfy: bool = False, throw: bool = True) -> Any:
"""for stored procedure
Args:
fn_name (str): Stored procedure name
ret_type (DBTypes): Type of data returned from stored procedure
params (dict, optional): params for the procedure. Defaults to None.
list_params (List[Any], optional): positional list params to the procedure. Defaults to None.
model (Any, optional): model for build returned data. Defaults to None.
connection ([type], optional): connection database. Defaults to None.
jsonfy (bool, optional): return data in dict format if model is null. Defaults to False.
throw: (bool, optional): raise exceptions
Returns:
Any: processed data
"""
return self.__call(lambda c, fn, p: c.execute(f'call {fn}', p), fn_extract_data, fn_name, ret_type, params,
list_params, model,
connection, jsonfy, throw)
def __validate_config(self) -> dict:
if self.config is not None:
return asdict(self.config)
return self.dict_config
def init_local_client(self, path: str):
raise NotImplementedError("Not implemented for MySQL!")
def connect(self):
raise NotImplementedError("Not implemented for MySQL!")
def close(self) -> None:
raise NotImplementedError("Not implemented for MySQL!")
def is_connected(self) -> bool:
raise NotImplementedError("Not implemented for MySQL!")
def get_connection(self) -> Any:
raise NotImplementedError("Not implemented for MySQL!") | zpy-db-core | /zpy-db-core-1.2.2.tar.gz/zpy-db-core-1.2.2/zdb/postgresql/__init__.py | __init__.py |
import copy
from dataclasses import asdict
from typing import Optional, Any, List, Callable, TypeVar, Generic
from mysql.connector import MySQLConnection
from mysql import connector
import mysql
from zdb.commons import show_info, DBTypes, get_map_type, ZDatabase, ZDBConfig, build_params, process_exception
from zpy.utils.funcs import safely_exec
T = TypeVar("T")
def __fn_extract_data(result_set, rows, column_names, ret_type: DBTypes, model, jsonfy=False) -> Any:
"""
Inner Data Builder
"""
if ret_type == DBTypes.cursor:
if model is not None:
return [model(**dict(zip(column_names, r))) for r in rows]
if jsonfy is True:
return [dict(zip(column_names, r)) for r in rows]
return rows
else:
if len(rows) > 0:
parser = get_map_type(ret_type)
if model is not None:
return model(**dict(zip(column_names, rows[0])))
if jsonfy is True:
return dict(zip(column_names, rows[0]))
if ret_type == DBTypes.single_item:
return rows[0]
return parser(rows[0][0])
return None
def extract_data(result_set, ret_type: DBTypes, cursor, model, jsonfy=False) -> Any:
"""
Data Builder for custom stored procedures
"""
for result in cursor.stored_results():
colum_names = result.column_names
rows = result.fetchall()
if not ret_type:
return rows
return __fn_extract_data(result_set, rows, colum_names, ret_type, model, jsonfy)
return result_set
def fn_extract_data(result_set, ret_type: DBTypes, cursor, model, jsonfy=False) -> Any:
"""
Data Builder for custom functions
"""
colum_names = cursor.column_names
rows = cursor.fetchall()
if not ret_type:
return rows
return __fn_extract_data(result_set, rows, colum_names, ret_type, model, jsonfy)
class ZMySQL(ZDatabase):
def __init__(self, dict_config: Optional[dict] = None, config: Optional[ZDBConfig] = None,
verbose: bool = False):
self.dict_config = dict_config
self.config = config
self.verbose = verbose
@classmethod
def setup(cls, config: dict, verbose: bool = False):
return cls(config, config=None, verbose=verbose)
@classmethod
def setup_of(cls, config: ZDBConfig, verbose: bool = False):
return cls(config=config, verbose=verbose)
@classmethod
def from_of(cls, user: str, password: str, host: str, db_name: str, verbose: bool = False):
return cls({
'user': user,
'password': password,
'host': host,
'database': db_name,
'raise_on_warnings': False
}, verbose=verbose)
def new_connect(self) -> MySQLConnection:
final_config = self.__validate_config()
if final_config is None:
raise ValueError("The value of db configuration can not be null.")
return connector.connect(**final_config)
def call(self, name: str, ret_type: Optional[DBTypes] = None, params: Optional[dict] = None,
list_params: Optional[List[Any]] = None,
model: Optional[Any] = None,
connection=None, jsonfy: bool = False, throw: bool = True) -> Any:
"""Stored procedure caller
Args:
name (str): Stored procedure name
ret_type (DBTypes): Type of data returned from stored procedure
params (dict, optional): params for the procedure. Defaults to None.
list_params (List[Any], optional): positional list params to the procedure. Defaults to None.
model (Any, optional): model for build returned data. Defaults to None.
connection ([type], optional): connection database. Defaults to None.
jsonfy (bool, optional): return data in dict format if model is null. Defaults to False.
throw (bool, optional): raise exceptions
Returns:
Any: processed data
"""
return self.__call(lambda c, fn, p: c.callproc(fn, p), extract_data, name, ret_type, params, list_params, model,
connection, jsonfy, throw)
def __call(self, runner: Callable, extractor: Callable, name: str, ret_type: DBTypes, params: dict = None,
list_params: List[Any] = None,
model: Any = None,
connection=None, jsonfy: bool = False, throw: bool = True) -> Any:
cn = connection
connection_passed = True
if connection is None:
connection_passed = False
cn = self.new_connect()
cursor = None
if cn is not None:
cursor = cn.cursor()
if cn is None or cursor is None:
raise Exception("Can't get db connection")
if self.verbose:
show_info(name, params, ret_type, model)
try:
result_set = runner(cursor, name, build_params(dict_params=params, list_params=list_params))
return extractor(result_set, ret_type, cursor, model, jsonfy)
except mysql.connector.Error as error:
process_exception(throw, error)
except Exception as e:
process_exception(throw, e)
finally:
if not connection_passed:
safely_exec(lambda c: c.close(), args=[cursor])
if cn.is_connected():
safely_exec(lambda c: c.commit(), args=[cn])
safely_exec(lambda c: c.close(), args=[cn])
return None
def exec(self, fn_name: str, ret_type: DBTypes = None, params: dict = None, list_params: List[Any] = None,
model: Any = None,
connection=None, jsonfy: bool = False, throw: bool = True) -> Any:
"""Function executor
Args:
fn_name (str): Stored procedure name
ret_type (DBTypes): Type of data returned from stored procedure
params (dict, optional): params for the procedure. Defaults to None.
list_params (List[Any], optional): positional list params to the procedure. Defaults to None.
model (Any, optional): model for build returned data. Defaults to None.
connection ([type], optional): connection database. Defaults to None.
jsonfy (bool, optional): return data in dict format if model is null. Defaults to False.
throw: (bool, optional): raise exceptions
Returns:
Any: processed data
"""
return self.__call(lambda c, fn, p: c.execute(f'SELECT {fn}', p), fn_extract_data, fn_name, ret_type, params,
list_params, model,
connection, jsonfy, throw)
def __validate_config(self) -> dict:
if self.config is not None:
return asdict(self.config)
return self.dict_config
def init_local_client(self, path: str):
raise NotImplementedError("Not implemented for MySQL!")
def connect(self):
raise NotImplementedError("Not implemented for MySQL!")
def close(self) -> None:
raise NotImplementedError("Not implemented for MySQL!")
def is_connected(self) -> bool:
raise NotImplementedError("Not implemented for MySQL!")
def get_connection(self) -> Any:
raise NotImplementedError("Not implemented for MySQL!")
class ZDBMediator(Generic[T]):
SINGLE_NAME_DS: str = 'MAIN'
def __init__(self, common_config: dict, verbose: bool = False):
self.common_config = common_config
self.verbose = verbose
self.data_sources = []
def __default_configured(self) -> bool:
return len(list(filter(lambda x: x['default'] is True, self.data_sources))) > 0
def __name_configured(self, name: str) -> bool:
return len(list(filter(lambda x: x['name'] == name, self.data_sources))) > 0
@classmethod
def of(cls, common_config: dict, verbose=False):
return cls(common_config, verbose)
@classmethod
def single(cls, config: dict, verbose=False):
instance = cls(config, verbose)
instance.add_ds(config, True, verbose, name=cls.SINGLE_NAME_DS)
return instance
def add_ds(self, config: dict, default: bool = False, verbose=False, name: str = None):
_name = config["database"]
if name is not None:
_name = name
if self.__name_configured(_name) is True:
raise ValueError(f'name already configured')
if default is True and self.__default_configured() is True:
default = False
self.data_sources.append(
{
"instance": ZMySQL.setup(config, verbose),
"name": _name,
"config": config,
"default": default
}
)
def add_common_with(self, database: str, user: str, password: str, default: bool = False, name: str = None):
_name = database
if name is not None:
_name = name
if self.__name_configured(_name) is True:
raise ValueError(f'name already configured')
if default is True and self.__default_configured() is True:
default = False
_config = copy.copy(self.common_config)
_config.update({"user": user, "password": password, "database": database})
self.data_sources.append(
{
"instance": ZMySQL.setup(_config, self.verbose),
"name": _name,
"config": _config,
"default": default
}
)
def get(self, name: str) -> ZMySQL:
ds = list(filter(lambda x: x['name'] == name, self.data_sources))
if ds is None:
raise ValueError(f"Datasource with name: {name} is not configured")
return ds[0]["instance"]
def default(self) -> ZMySQL:
ds = list(filter(lambda x: x['default'] is True, self.data_sources))
if ds is None:
raise ValueError(f"Datasource by default not configured")
return ds[0]["instance"]
def notify(self):
...
def add(self, config: dict, is_default: bool = False, name: str = None):
self.add_ds(config, is_default, self.verbose, name)
return self
def add_common(self, database: str, user: str, password: str, is_default: bool = False, name: str = None):
self.add_common_with(database, user, password, is_default, name)
return self
def build(self):
"""
Will be removed
"""
return self
class ZMySQLCluster:
reader: ZMySQL = None
writer: ZMySQL = None
def __init__(self):
... | zpy-db-core | /zpy-db-core-1.2.2.tar.gz/zpy-db-core-1.2.2/zdb/mysql/__init__.py | __init__.py |
from abc import ABC, abstractmethod
from decimal import Decimal
from enum import Enum
from typing import Union, Any, List, Dict, Callable, Optional, TypeVar
from dataclasses import dataclass
from zpy.logger import zL
from zpy.app import zapp_context as ctx
T = TypeVar('T')
def show_info(fn: str, params: Union[List[Any], Dict[Any, Any]], ret_type: Enum, v_model):
print("\n|-------------------------------------------------|\n")
print(f" Function Called: {fn} ")
print(" Params: {}".format(params))
print(" Return Type: {}".format(ret_type.name))
print(f" Ref Model: {v_model}")
print("\n|-------------------------------------------------|\n")
@dataclass
class ZDBConfig:
user: str
password: str
database: str
host: str
port: int = 3306
autocommit: bool = False
raise_on_warnings = True
service: str = None # Use for Oracle
parser_types = [
None,
lambda x: float(x),
lambda x: str(x),
lambda x: int(x),
lambda x: Decimal(str(x).strip(' "')),
lambda x: bool(x),
lambda x: x
]
class DBTypes(Enum):
cursor = 1
float = 2
string = 3
integer = 4
decimal = 5
bool = 6
single_item = 7
out_integer = 4
out_bool = 6
def get_map_type(r_type: DBTypes) -> Callable:
return parser_types[r_type.value - 1]
def build_params(dict_params: dict, list_params: List[Any]) -> List[Any]:
if dict_params is not None:
return list(dict_params.values())
if list_params is not None:
return list_params
return []
def process_exception(throw: bool, e: Exception):
if throw is True:
raise e
zL.e("Failed when try to call function or stored procedure.", exc_info=e)
class ZDatabase(ABC):
@classmethod
@abstractmethod
def setup(cls, config: dict, verbose: bool = False):
"""
Setup connection arguments using dictionary
"""
...
@classmethod
@abstractmethod
def setup_of(cls, config: ZDBConfig, verbose: bool = False):
"""
Setup connection data
"""
...
@classmethod
@abstractmethod
def from_of(cls, user: str, password: str, host: str, db_name: str, verbose: bool = False):
"""
Setup connection data
"""
...
def new_connect(self) -> Any:
...
def __validate_config(self) -> Union[dict, str]:
...
def call(self, name: str, ret_type: DBTypes, params: dict = None, list_params: List[Any] = None, model: Any = None,
connection=None, jsonfy: bool = False, throw=False) -> Any:
"""Stored procedure caller
Args:
name (str): Stored procedure name
ret_type (DBTypes): Type of data returned from stored procedure
params (dict, optional): params for the procedure. Defaults to None.
list_params (List[Any], optional): positional list params to the procedure. Defaults to None.
model (Any, optional): model for build returned data. Defaults to None.
connection ([type], optional): connection database. Defaults to None.
jsonfy (bool, optional): return data in dict format if model is null. Defaults to False.
throw (bool,optional)
Returns:
Any: processed data
"""
...
def exec(self, fn_name: str, ret_type: DBTypes, params: dict = None, list_params: List[Any] = None,
model: Any = None,
connection=None, jsonfy: bool = False, throw=False) -> Any:
"""Function executor
Args:
fn_name (str): Stored procedure name
ret_type (DBTypes): Type of data returned from stored procedure
params (dict, optional): params for the procedure. Defaults to None.
list_params (List[Any], optional): positional list params to the procedure. Defaults to None.
model (Any, optional): model for build returned data. Defaults to None.
connection ([type], optional): connection database. Defaults to None.
jsonfy (bool, optional): return data in dcit format if model is null. Defaults to False.
throw (bool,optional)
Returns:
Any: processed data
"""
...
@abstractmethod
def init_local_client(self, path: str):
"""
Initialize local client
"""
...
@abstractmethod
def connect(self):
...
@abstractmethod
def close(self) -> None:
...
@abstractmethod
def is_connected(self) -> bool:
...
@abstractmethod
def get_connection(self) -> Any:
...
class ZDBPool(ABC):
@abstractmethod
def setup_db(self, db: ZDatabase) -> None:
...
@abstractmethod
def get_db(self):
...
@abstractmethod
def setup_extras(self, config: dict) -> None:
...
@abstractmethod
def get_pool_connection(self) -> Any:
...
@abstractmethod
def release_connection(self, connection) -> bool:
...
@abstractmethod
def close_pool(self):
pass
@abstractmethod
def initialize_pool(
self,
max_connections: int = 5,
min_connections: int = 1,
) -> bool:
...
@dataclass
class ZDBWrapper:
connection: Any
db: ZDatabase
name: str
verbose: bool
def configure(self, connection: Any):
if self.connection:
self.release()
self.connection = connection
def has_connection(self) -> bool:
return self.connection is not None
def release(self):
if not self.connection:
return
try:
self.connection.close()
self.connection = None
if self.verbose:
ctx().logger.info(f"Connection of {self.name} closed")
except Exception as e:
ctx().logger.err("An error occurred while releasing connections", exc_info=e)
class ZDBConnectionManager:
def __init__(self, verbose: bool = False, auto_commit: bool = True):
self.dbs: Dict[str, ZDBWrapper] = {}
self.count = 0
self.verbose = verbose
self.auto_commit = auto_commit
def add(self, name: str, db: ZDatabase) -> 'ZDBConnectionManager':
"""
Add new database configuration.
@param name: database identifier name
@param db: database configuration instance
@return: manager
"""
self.dbs[name] = ZDBWrapper(None, db, name, self.verbose)
return self
def __configure(self, name: str):
if name not in self.dbs:
return
if self.dbs.get(name).has_connection():
ctx().logger.warn(f"Warning: Already exist a connection opened for {name} database.")
return
self.dbs.get(name).configure(
self.dbs.get(name).db.new_connect()
)
self.count = self.count + 1
if self.verbose:
ctx().logger.info(f"New connection opened for {name} database.")
def open_for(self, dbs: List[str]) -> None:
"""
Open db connections for specified database configurations.
@param dbs: Db names to open connections.
@return: None
"""
for name in dbs:
self.__configure(name)
def open_single(self, name: str) -> None:
"""
Open db connections for all database configurations.
@return: None
"""
self.__configure(name)
def open(self) -> None:
"""
Open db connections for all database configurations.
@return: None
"""
for name in self.dbs.keys():
self.__configure(name)
def release_for(self, name: str) -> None:
"""
Release specific opened connections
@return: None
"""
db = self.dbs.get(name, None)
if db and db.has_connection():
if self.auto_commit:
self.commit_for(name)
db.release()
def release(self) -> None:
"""
Release all opened connections
@return: None
"""
for name in self.dbs.keys():
if self.auto_commit:
self.commit_for(name)
self.dbs.get(name).release()
def count_connections(self) -> int:
"""
Count all opened connections
@return: total of open connections
"""
return self.count
def has_connections(self, name: str) -> bool:
"""
Verify if database provided has opened connection.
@param name: database name
@return: true if it has connection
"""
if name in self.dbs:
return self.dbs.get(name).has_connection()
return False
def connection(self, name: str) -> [Any]:
"""
Retrieve connection for database name provided.
@param name: database name
@return: connection
"""
if name in self.dbs:
if self.dbs.get(name).has_connection():
return self.dbs.get(name).connection
return None
def commit_for(self, name: str) -> None:
"""
Commit to specified database
@param name: database name
@return: connection
"""
if name in self.dbs:
if self.dbs.get(name).has_connection():
self.dbs.get(name).connection.commit()
def commit(self) -> None:
"""
Commit all connections
@return: connection
"""
for db_name in self.dbs.keys():
self.commit_for(db_name)
def force_connection(self, name: str) -> Optional[Any]:
"""
Force connection retrieve.
If not existing opened connection for database provided,
will try to open and return connection
@param name: database name
@return: connection
"""
if name in self.dbs:
if self.dbs.get(name).has_connection():
return self.dbs.get(name).connection
self.open_single(name)
if self.dbs.get(name).has_connection():
return self.dbs.get(name).connection
return None
def database(self, name: str, db_type: T = None) -> Union[T, Any]:
"""
Retrieve database by name
@param name: database name
@param db_type: Database type
@return:
"""
if name in self.dbs:
return self.dbs.get(name).db
return None
def __del__(self):
self.release() | zpy-db-core | /zpy-db-core-1.2.2.tar.gz/zpy-db-core-1.2.2/zdb/commons/__init__.py | __init__.py |
from dataclasses import asdict
from typing import TypeVar
from marshmallow_objects import models
from typing import Any, Dict, List, Optional, Union
from zpy.logger import zL
from zpy.utils.funcs import safely_exec, safely_exec_with
from zdb.commons import process_exception
from enum import Enum
import cx_Oracle
import logging
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
from zdb.commons import ZDatabase, ZDBConfig, ZDBPool, show_info, parser_types
from zdb.utils import get_current_schema
T = TypeVar("T")
class OracleType(Enum):
cursor = cx_Oracle.CURSOR
number = cx_Oracle.NUMBER
string = cx_Oracle.STRING
integer = cx_Oracle.NUMBER
decimal = cx_Oracle.NUMBER
bool = cx_Oracle.BOOLEAN
class OracleParam(Enum):
LIST_INTEGER = "LIST_INTEGER"
LIST_STR = "LIST_VARCHAR"
LIST_CLOB = "LIST_CLOB"
class ZParam:
def __init__(
self,
value: Union[List[int], List[float], List[str], List[Any]],
param_type: OracleParam,
key: str,
origin: str = None,
) -> None:
self.value = value
self.param_type = param_type
self.key = key
self.origin = origin
class IntList(ZParam):
def __init__(self, value: List[int], key: str, origin: str = None) -> None:
super().__init__(value, OracleParam.LIST_INTEGER, key, origin)
class StrList(ZParam):
def __init__(self, value: List[str], key: str, origin: str = None) -> None:
super().__init__(value, OracleParam.LIST_STR, key, origin)
class ClobList(ZParam):
def __init__(self, value: List[Any], key: str, origin: str) -> None:
super().__init__(value, OracleParam.LIST_CLOB, key, origin)
def get_str_connection(config: dict = None, mode="TSN") -> Union[dict, str]:
if config is None:
raise Exception("The data for the connection was not provided")
server = config["host"]
port = config["port"]
service = config["service"]
user = config["user"]
password = config["password"]
if mode == "DSN":
return "{0}:{1}/{2}".format(server, port, service)
return "{0}/{1}@{2}:{3}/{4}".format(user, password, server, port, service)
def extract_of_result_data(
result_set, ret_type: OracleType, model=None, jsonfy=False
):
"""
New version of result set processor
"""
if ret_type == OracleType.cursor:
columns = [field[0] for field in result_set.description]
if model is not None:
return [model(**dict(zip(columns, r))) for r in result_set]
if jsonfy is True:
return [dict(zip(columns, row)) for row in result_set]
return result_set.fetchall()
elif OracleType.number:
return safely_exec_with(lambda x: float(result_set), result_set, args=[result_set])
elif OracleType.integer:
return safely_exec_with(lambda x: int(result_set), result_set, args=[result_set])
elif OracleType.decimal:
return safely_exec_with(lambda x: parser_types[4](x), result_set, args=[result_set])
else:
return parser_types[2](result_set)
def make_custom_param(
connection: Any,
param_type: OracleParam,
value: Union[List[int], List[float], List[str], List[Any]],
schema: str = None,
):
"""
Make custom param
"""
db_schema = (
"" if (schema is None or schema.replace(" ", "") == "") else f"{schema}."
)
list_type = connection.gettype(f"{db_schema}{param_type.value}")
return list_type.newobject(value)
class ZOracle(ZDatabase):
__local_client_initialized: bool = False
__local_client_path: str = None
__config_connection: dict = None
__connection = None
__is_connected: bool = False
__schemas: List[Dict] = None
__env: str = None
__verbose: bool = False
def __init__(
self,
config: dict = None,
local_client_path: str = None,
schemas: List[Dict] = None,
env: str = None,
verbose: bool = False,
) -> None:
self.__local_client_path = local_client_path
self.__config_connection = config
self.__schemas = schemas
self.__env = env
self.__verbose = verbose
@classmethod
def setup(cls, config: dict,
local_client_path: str = None,
schemas: List[Dict] = None,
env: str = None,
verbose: bool = False):
return cls(config, local_client_path, schemas, env, verbose)
@classmethod
def setup_of(cls, config: ZDBConfig, local_client_path: str = None,
schemas: List[Dict] = None,
env: str = None,
verbose: bool = False, ):
return cls(asdict(config), local_client_path, schemas, env, verbose)
@classmethod
def from_of(cls, user: str, password: str, host: str, db_name: str, verbose: bool = False):
raise NotImplementedError("Not implemented for Oracle!")
def new_connect(self) -> Any:
str_connection = self.__validate_config(self.__config_connection)
return cx_Oracle.connect(str_connection)
def init_local_client(self, path: str = None):
if self.__local_client_initialized:
return
value = path if self.__local_client_path is None else self.__local_client_path
try:
if value is None:
raise Exception("Local client path not provided.")
cx_Oracle.init_oracle_client(lib_dir=value)
self.__local_client_initialized = True
except Exception as e:
self.__local_client_initialized = False
logging.exception(e)
def __validate_config(self, config: dict = None, mode="TSN") -> Union[dict, str]:
values = (
config if self.__config_connection is None else self.__config_connection
)
return get_str_connection(values, mode)
def connect(self, config: dict = None):
"""
Start oracle connection
"""
if self.__is_connected:
return True
try:
str_connection = self.__validate_config(config)
self.__connection = cx_Oracle.connect(str_connection)
self.__is_connected = True
return True
except Exception as e:
raise e
def close(self):
if self.__is_connected:
self.__connection.close()
self.__is_connected = False
def is_connected(self) -> bool:
return self.__is_connected
def get_connection(self):
return self.__connection
def call(
self,
fn: str,
ret_type: OracleType,
params: Optional[Dict] = None,
custom_params: Optional[List[ZParam]] = None,
model: Optional[models.Model] = None,
connection=None,
jsonfy=False,
throw=False
) -> Optional[Any]:
"""
Execute or call oracle functions - FN v0.0.1 | Core v0.0.7
New feature for call oracle db functions
Use this function instead function 'call'
Parameters
----------
fn : str | required
Function name with package name: PO_LINES_PKG.FN_GET_LINE
ret_type : OracleType | required
The return type of oracle db function
params : Dict | Optional
Set parameter that the oracle funtion expects
custom_params : Optional[List[ZParam, IntList, StrList, ClobList]] | Optional
Custom Set parameter that the oracle funtion expects, see avaialble custom types
model : marshmallow_objects.models.Model | Optional
Model specification where the db data will be volcated
connection : DB Connection | Optional
The db connection object, if it is not passed by params, it tries to get a global instance
jsonfy : bool | Optional
Return data in dict format
throw : bool | Optional
raise exception or not
Raises
------
NotValueProvided
Connection
Returns
-------
result set : Union[List[Model],int,float,str]
The result set of oracle db function
"""
cursor = None
connection_provided = True
db_schema = None
if connection is not None:
cursor = connection.cursor()
else:
connection_provided = False
connection = self.new_connect()
cursor = connection.cursor()
if connection is None:
raise Exception("Can't get db connection")
if self.__schemas is not None:
db_schema = get_current_schema(self.__schemas, self.__env, self.__env)
if custom_params is not None and len(custom_params) > 0:
if params is None:
params = {}
# * Find the current env for extract the schema
for custom in custom_params:
params[custom.key] = make_custom_param(
connection,
param_type=custom.param_type,
value=custom.value,
schema=db_schema,
)
fn = (
fn
if db_schema is None or db_schema.replace(" ", "") == ""
else f"{db_schema}.{fn}"
)
if self.__verbose:
show_info(fn, params, ret_type, model)
try:
result_set = (
cursor.callfunc(fn, ret_type.value, keywordParameters=params)
if params is not None
else cursor.callfunc(fn, ret_type.value)
)
return extract_of_result_data(result_set, ret_type, model, jsonfy)
except Exception as e:
process_exception(throw, e)
finally:
safely_exec(lambda l: l.close(), args=[cursor])
if connection_provided is False:
safely_exec(lambda l: l.commit(), args=[connection])
safely_exec(lambda l: l.close(), args=[connection])
return None
def exec(self, fn_name: str, ret_type: OracleType, params: dict = None, list_params: List[Any] = None,
model: Any = None,
connection=None, jsonfy: bool = False, throw=False) -> Any:
raise NotImplementedError("")
class ZDBOraclePool(ZDBPool):
def __init__(self, db: ZDatabase = None, config: dict = None):
self.db = db
if db is None:
self.db = ZOracle.setup(config)
self.config = config
self.__pool = None
self.__max = 5
self.__min = 1
self.__threading = False
self.__homogeneous = True
self.__pool_created: bool = False
if config is not None:
self.setup_extras(config)
def setup_db(self, db: ZDatabase) -> None:
self.db = db
def setup_extras(self, config: dict) -> None:
self.config = config
try:
self.__max = config.get("max_connections", 5)
self.__min = config.get("min_connections", 1)
self.__threading = config.get("threading", False)
self.__homogeneous = config.get("homogeneous", True)
except Exception as e:
zL.e("An error occurred when setup config", exc_info=e)
def initialize_pool(
self,
max_connections: int = None,
min_connections: int = None
) -> bool:
if self.__pool_created:
return False
zL.i("Initializing Pool...")
self.__pool = cx_Oracle.SessionPool(
user=self.config.get("user"),
password=self.config.get("password"),
dsn=get_str_connection(self.config, mode="DSN"),
homogeneous=self.__homogeneous,
encoding="UTF-8",
max=self.__max if max_connections is None else max_connections,
min=self.__min if min_connections is None else min_connections,
threaded=self.__threading,
)
self.__pool_created = True
zL.i("Pool Started Successfully...")
return True
def close_pool(self, force: bool = False):
try:
if self.__pool_created:
self.__pool.close(force=force)
self.__pool_created = False
zL.i("Pool Closed Successfully")
except Exception as e:
zL.e("An error occurred when try close pool", exc_info=e)
def get_pool_connection(self):
if self.__pool_created:
return self.__pool.acquire()
zL.w("DB Pool not initialized, try to initialize pool connection...")
try:
self.initialize_pool()
self.__pool_created = True
return self.__pool.acquire()
except Exception as e:
zL.e("Unable to initialize the connections pool...", exc_info=e)
self.__pool_created = False
def release_connection(self, connection) -> bool:
try:
if self.__pool_created:
self.__pool.release(connection)
return True
except Exception as e:
zL.e("An error occurred when try to release connection", exc_info=e)
return False
def get_db(self):
return self.db | zpy-db-core | /zpy-db-core-1.2.2.tar.gz/zpy-db-core-1.2.2/zdb/oracle/__init__.py | __init__.py |
from abc import ABC, abstractmethod
from typing import Any, List
from zpy.custom.models import DBSSMCredentialsModel, SSMConstants
from zpy.cloud.aws.ssm import SSMParameter
from zpy.api import ApiConfiguration
from zpy.db import DBConnection
from zpy.db.oracle import OracleDB
import logging
import os
class Plugin(ABC):
@abstractmethod
def initialize(self, config: ApiConfiguration, ssm: SSMParameter, *args):
pass
# Receive this by parameter where use
# class TradeConstants(Enum):
# SSM_PREFIX = "/aws/reference/secretsmanager/com/upax/trade"
# LOCAL_DB_SSM = "/db/oracle/zeuststtrade"
# DB_SSM = "/db/oracle/usrtrade"
# AES_SK_SSM = "/security/encryption"
# AWS_SSM = "/aws/services"
# Database Schemas for Trade Project
DB_SCHEMES = [
{"env": "prod", "value": "TRADE"},
{"env": "qa", "value": "TRADE"},
{"env": "dev", "value": "TRADE"},
{"env": "localdev", "value": "ZEUSTSTTRADE"},
]
# Response modifier acordin zcommons procesor
custom_response_mapper = lambda r: {"message": r}
def pool_initialize(config: ApiConfiguration):
"""
Initialization of the connection db connection pool and
local client in case we are in local dev environment.
"""
db: DBConnection = OracleDB(
config=config.to_dict(), schemas=DB_SCHEMES, env=config.ENVIRONMENT
)
try:
if config.ENVIRONMENT == "localdev":
db.init_local_client(os.getenv("LOCAL_CLIENT"))
db.initialize_pool(
dns=db.get_tsn_dsn_conenction(config.to_dict(), "DSN"),
homogeneous=True,
user=config.DB_USER,
pwd=config.DB_PASSWORD,
)
except Exception as e:
print(e)
return db
def setup_env(vars: dict):
for k in vars.keys():
os.environ[k] = vars[k]
def load_plugins(
plugins: List[Plugin] = [],
args_plugs: List[List[Any]] = [],
env: str = None,
ssm_const: SSMConstants = None,
):
try:
environment = os.getenv("env") if env == None else env
if environment == None:
raise Exception(
"Environment not found. set environment variable with name: 'env' and value: 'prod|qa|dev|localdev' "
)
if ssm_const == None:
raise Exception("SSM Constants cannot be null, set ssm constants: see: ")
ssm = SSMParameter(with_profile=True, prefix=ssm_const.smm_prefix)
db_prefix = (
ssm_const.local_db_ssm if environment == "localdev" else ssm_const.db_smm
)
db_credentials: DBSSMCredentialsModel = ssm.get(prefix=db_prefix, model=DBSSMCredentialsModel)
# setup_env(db_credentials)
aes_credentials = ssm.get(prefix=ssm_const.aes_sk)
# setup_env(aes_credentials)
config: ApiConfiguration = ApiConfiguration()
config.DB_URI = db_credentials.uri
config.DB_PASSWORD = db_credentials.password
config.DB_PORT = db_credentials.port
config.DB_SERVICE = db_credentials.service
config.DB_USER = db_credentials.user
config.ENVIRONMENT = os.getenv("env")
config.SECRET = os.getenv("API_SECRET")
config.CUSTOM = aes_credentials
plugins_result = []
for i, plugin in enumerate(plugins):
args = args_plugs[i] if i < len(args_plugs) else {}
plugins_result.append(plugin().initialize(config=config, ssm=ssm, **args))
return (config, plugins_result)
except Exception as e:
logging.exception(e)
def setup_microservice(
plugins: List[Plugin] = [],
args_plugs: List[List[Any]] = [],
env: str = None,
ssm_const: SSMConstants = None
):
"""
Deprecated function, will be remove in 1.0.0 core version.
Use instead load_plugins function.
"""
try:
environment = os.getenv("env") if env == None else env
if environment == None:
raise Exception(
"Environment not found. set environment variable with name: 'env' and value: 'prod|qa|dev|localdev' "
)
if ssm_const == None:
raise Exception("SSM Constants cannot be null, set ssm constants: see: ")
ssm = SSMParameter(with_profile=True, prefix=ssm_const.smm_prefix)
db_prefix = (
ssm_const.local_db_ssm
if environment == "localdev"
else ssm_const.db_smm
)
db_credentials: DBSSMCredentialsModel = ssm.get(prefix=db_prefix, model=DBSSMCredentialsModel)
# setup_env(db_credentials)
aes_credentials = ssm.get(prefix=ssm_const.aes_sk)
setup_env(aes_credentials)
config: ApiConfiguration = ApiConfiguration()
config.DB_URI = db_credentials.uri
config.DB_PASSWORD = db_credentials.password
config.DB_PORT = db_credentials.port
config.DB_SERVICE = db_credentials.service
config.DB_USER = db_credentials.user
config.ENVIRONMENT = os.getenv("env")
config.SECRET = os.getenv("API_SECRET")
db: DBConnection = pool_initialize(config)
plugins_result = []
for i, plugin in enumerate(plugins):
plugins_result.append(
plugin().initialize(config=config, ssm=ssm, *args_plugs[i])
)
return (config, db, plugins_result)
except Exception as e:
logging.exception(e) | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/custom/__init__.py | __init__.py |
from abc import ABC, abstractmethod
from zpy.api.reponse import Builder
from zpy.api.stages import Decrypt
from zpy.logger import g_log
from zpy.api.errors import ErrorBuilder
from flask import Flask
from typing import Any
from flask.wrappers import Request, Response
from zpy.utils.Encryptor import AESEncryptor
import json
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "upax"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
# Middlewares | Zurck'Z Middlware
# Base middleware for flask
class ZMiddleware(ABC):
def __init__(self, app: Flask, **kwargs) -> None:
super().__init__()
self.app = app
self.kwargs = kwargs
@abstractmethod
def __call__(self, environ: Any, start_response: Any) -> Any:
return self.app(environ, start_response)
# Custom Middlewares
# Encrypt body of responses with AES algorithm
class EncryptMiddleWare(ZMiddleware):
def __init__(self, app: Flask, **kwargs) -> None:
super().__init__(app, **kwargs)
self.app = app
def __call__(self, environ: Any, start_response: Any) -> Any:
response = Response(environ)
return super().__call__(environ, start_response)
# return response(environ,start_response)
# Custom Middlewares
# Encrypt body of responses with AES algorithm
class DecryptMiddleWare(ZMiddleware):
def __init__(self, app: Flask, **kwargs):
super().__init__(app, **kwargs)
self.app = app
def __call__(self, environ, start_response):
try:
if environ["request"]:
aes: str = None
if (
self.kwargs != None and "aes_sk" in self.kwargs
): # ! WARNING HARD KEY FOR EXTARCT AES SK
aes = self.kwargs["aes_sk"]
encrypt_data = environ["request"]
decrypt_data = AESEncryptor.decrypt_ws_response(
encrypt_data, secret_key=aes
)
environ["request"] = decrypt_data
return self.app(environ, start_response)
except Exception as e:
stage = Decrypt()
g_log(e, stage)
res = Response(
json.dumps(
Builder.error(
errors=[
ErrorBuilder().common(
"Threw exception on decrypt process",
"Request supplied not have a valid format",
stage,
)
]
)
),
mimetype="text/json",
status=500,
)
return res(environ, start_response)
class ParserMiddleWare(ZMiddleware):
"""
Default middleware for custom access response
"""
def __init__(self, app: Flask, **kwargs):
super().__init__(app, **kwargs)
self.app = app
def __call__(self, environ, start_response):
request = Request(environ)
try:
if request.data:
environ["request"] = json.loads(request.data)
else:
environ["request"] = None
return self.app(environ, start_response)
except Exception as e:
stage = Decrypt()
g_log(e, stage)
res = Response(
json.dumps(
Builder.error(
errors=[
ErrorBuilder().common(
"Threw exception on decrypt process",
"Request supplied not have a valid format",
stage,
)
]
)
),
mimetype="text/json",
status=500,
)
return res(environ, start_response) | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/api/middleware.py | middleware.py |
from abc import abstractmethod
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "upax"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
class Stage():
name = ""
description = ""
def __init__(self,name,description) -> None:
self.name = name
self.description = description
@abstractmethod
def get(self) -> str:
return "\n[ PH: :: {0} ]\n[ PD: :: {1} ]\n".format(self.name,self.description)
@abstractmethod
def desc(self) -> str:
return "[ PD: :: {0} ]".format(self.description)
@abstractmethod
def title(self) -> str:
return "[ PH: :: {0} ]".format(self.name)
class Unspecified(Stage):
name = "Unspecified Stage"
description = "Unspecified stage by developer"
def __init__(self,name: str=None,description: str=None) -> None:
super().__init__(
name=self.name if name is None else name,
description=self.description if description is None else description
)
class StartingCoreModule(Stage):
name = "Starting Core Module"
description = "Initializing core modules"
def __init__(self,name: str=None,description: str=None) -> None:
super().__init__(
name=self.name if name is None else name,
description=self.description if description is None else description
)
class ResourceGateway(Stage):
name = "Resource Gateway"
description = "HTTP Resource Gateway"
def __init__(self,name: str=None,description: str=None) -> None:
super().__init__(
name=self.name if name is None else name,
description=self.description if description is None else description
)
class Decrypt(Stage):
name = "Decrypt Stage"
description = "Http request decryption Stage"
def __init__(self,name: str=None,description: str=None) -> None:
super().__init__(
name=self.name if name is None else name,
description=self.description if description is None else description
)
class SchemaValidation(Stage):
name = "Schema Validation Stage"
description = "Schemes or request models are validated according to the validations given"
def __init__(self,name: str=None,description: str=None) -> None:
super().__init__(
name=self.name if name is None else name,
description=self.description if description is None else description
)
class ResourceLayer(Stage):
name = "Resource Layer Stage"
description = "Abstract presentation to business logic."
def __init__(self,name: str=None,description: str=None) -> None:
super().__init__(
name=self.name if name is None else name,
description=self.description if description is None else description
)
class BusinessLogicExecution(Stage):
name = "Business Logic Layer Stage"
description = "Business Logic execution"
def __init__(self,name: str=None,description: str=None) -> None:
super().__init__(
name=self.name if name is None else name,
description=self.description if description is None else description
)
class AuthenticationCheck(Stage):
name = "Authentication Check Layer Stage"
description = "Authentication Check"
def __init__(self,name: str=None,description: str=None) -> None:
super().__init__(
name=self.name if name is None else name,
description=self.description if description is None else description
)
class AuthorizationCheck(Stage):
name = "Authorization Check Layer Stage"
description = "Authorization Check"
def __init__(self,name: str=None,description: str=None) -> None:
super().__init__(
name=self.name if name is None else name,
description=self.description if description is None else description
)
class ServiceLayer(Stage):
name = "Service Layer Stage"
description = "Service Layer, all buisiness logic execute"
def __init__(self,name: str=None,description: str=None) -> None:
super().__init__(
name=self.name if name is None else name,
description=self.description if description is None else description
)
class DataAccess(Stage):
name = "Data Access Stage"
description = "Data Access"
def __init__(self,name: str=None,description: str=None) -> None:
super().__init__(
name=self.name if name is None else name,
description=self.description if description is None else description
)
class BuildingReponses(Stage):
name = "Building Response Stage"
description = "Building response..."
def __init__(self,name: str=None,description: str=None) -> None:
super().__init__(
name=self.name if name is None else name,
description=self.description if description is None else description
)
class Encryption(Stage):
name = "Encryption Stage"
description = "Encryption Stage", "Response encryption proccess"
def __init__(self,name: str=None,description: str=None) -> None:
super().__init__(
name=self.name if name is None else name,
description=self.description if description is None else description
) | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/api/stages.py | stages.py |
from zpy.constants import Messages
from typing import Any, Dict, List, Tuple, Union
from zpy.api.reponse import BResponse, Builder, Status
from zpy.api.stages import ResourceLayer
from zpy.utils import get_operation_id
from zpy.api.exceptions import ZError
from marshmallow_objects import models
from marshmallow.exceptions import ValidationError
from zpy.logger import TLogger, c_warn
from flask.views import MethodView
from flask import request
from enum import Enum
import json
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
class HTTP_METHODS(Enum):
GET = 1
POST = 2
PUT = 3
PATCH = 4
DELETE = 5
class ZResource(MethodView):
__name__ = "ZResource"
blocked_methods = []
def __init__(self) -> None:
super().__init__()
print(f"Zurck'z - Core Resource was iniitalized")
def __method_not_allowed(self):
return Builder().add_status(Status.METHOD_NOT_ALLOWED).build()
def __is_not_allowed(self, method: HTTP_METHODS):
return method in self.blocked_methods
def get(self):
if self.__is_not_allowed(HTTP_METHODS.GET):
return self.__method_not_allowed()
def post(self):
if self.__is_not_allowed(HTTP_METHODS.POST):
return self.__method_not_allowed()
def put(self):
if self.__is_not_allowed(HTTP_METHODS.PUT):
return self.__method_not_allowed()
def patch(self):
if self.__is_not_allowed(HTTP_METHODS.PATCH):
return self.__method_not_allowed()
def delete(self):
if self.__is_not_allowed(HTTP_METHODS.DELETE):
return self.__method_not_allowed()
def new_operation(self):
"""
Create a new http operation.
Returns:
l Logger object
i Operation id
"""
operation_id = get_operation_id()
log = TLogger(request.path, operation_id)
log.add_phase(phase=ResourceLayer())
if "request" in request.environ:
log.add_info("Request:")
log.add_info(request.environ["request"])
return log, operation_id
def close_operation(
self, l: TLogger = None, e: Exception = None, failed: bool = False
):
"""
Dispose current logger.
l: TLogger
Logger object
e: Exception
Exception to add into logger stream
failed: bool
Determine if logger stream should be printed
"""
if l != None:
if failed:
l.add_exception(e)
l.show_stack_trace()
l.dispose()
def get_request(self) -> dict:
"""
Get current request
Raises
------
NotValueProvided
If no values is set for the response or passed in as a parameter.
Returns
-------
request : Dict[Any]
Dict with request content
"""
try:
if "request" in request.environ and request.environ["request"] != None:
return json.loads(request.environ["request"])
return {}
except Exception as e:
return {}
def either_validation(
self, request: dict, model: models.Model
) -> Tuple[models.Model, Dict]:
"""
Validate request acordning model specification
Parameters
----------
request : dict | required
Request object to validate
model : marshmallow_objects.models.Model | required
The model specification
Raises
------
NotValueProvided
If no values is set for the response or passed in as a parameter.
Returns
-------
(model,errors) : Tuple[models.Model, Dict]
Tuple with model and erros either the validation process
"""
model_result: Union[List, models.Model] = None
errors: Union[List, None] = None
try:
if request == None or len(request.items()) == 0:
return None, {
"request": "The request was not provided, validation request error",
"fields": f"{model().__missing_fields__} not provided",
}
model_result = model(**request)
except ValidationError as e:
model_result = e.valid_data
if isinstance(e.messages, Dict):
errors = [e.messages]
else:
errors = e.messages
return model_result, errors
def success(
self,
payload={},
logger: TLogger = None,
status_code: Status = Status.SUCCESS,
operation_id=None,
description: str = None,
map=None,
):
"""
Build success response and dispose logger
"""
self.close_operation(logger, None)
return BResponse.success(payload, status_code, operation_id, description, map)
def bad_request(
self,
payload=None,
logger: TLogger = None,
errors: List[Any] = [],
status_code: Status = Status.BAD_REQUEST,
operation_id=None,
description: str = None,
map=None,
):
"""
Build bad request response and dispose logger
"""
self.close_operation(logger, None)
return BResponse.bad_request(
payload, errors, status_code, operation_id, description, map
)
def response(
self,
payload={},
logger: TLogger = None,
errors: List[Any] = [],
status_code: Status = Status.SUCCESS,
operation_id=None,
description: str = None,
map=None,
):
"""
Build any response and dispose logger
"""
self.close_operation(logger, None)
return BResponse.bad_request(
payload, errors, status_code, operation_id, description, map
)
def handlde_exceptions(
self,
exception: Union[ZError, Exception],
logger: TLogger,
operation_id: str,
payload=None,
custom_errors: List[Union[str, dict]] = None,
custom_message: str = None,
custom_status: Status = None,
map=None,
) -> Tuple[Dict, int]:
"""
Handle exception and build error response with default or custom data
Parameters
----------
exception : Union[Exception,ZError]
Exception object to handler
logger : ZLogger
Operation Logger for close operation and show trace
operation_id: str
Operation ID, must be the same as the initial ID of the operation
custom_errors: Optional[List[Union[str, dict]]]
Errors list for add in the resopnse
custom_message: str
Custom response description
custom_statis: Status
Cutom status for response
map: Fn Map, Optional
Response Modifier function
Raises
------
NotValueProvided
If no values is set for the response or passed in as a parameter.
Returns
-------
HttpResponse : Tuple[Dict,int]
Response with error code or custom data
"""
c_warn("handlde_exceptions() will be deprecated in version 1.0.0, use handle_exceptions instead.")
return self.handle_exceptions(
exception,
logger,
operation_id,
payload,
custom_errors,
custom_message,
custom_status,
map,
)
def handle_exceptions(
self,
exception: Union[ZError, Exception],
logger: TLogger,
operation_id: str,
payload=None,
custom_errors: List[Union[str, dict]] = None,
custom_message: str = None,
custom_status: Status = None,
map=None,
) -> Tuple[Dict, int]:
"""
Handlde exception and build error response with default or custom data
Parameters
----------
exception : Union[Exception,ZError]
Exception object to handler
logger : ZLogger
Operation Logger for close operation and show trace
operation_id: str
Operation ID, must be the same as the initial ID of the operation
custom_errors: Optional[List[Union[str, dict]]]
Errors list for add in the resopnse
custom_message: str
Custom response description
custom_statis: Status
Cutom status for response
map: Fn Map, Optional
Response Modifier function
Raises
------
NotValueProvided
If no values is set for the response or passed in as a parameter.
Returns
-------
HttpResponse : Tuple[Dict,int]
Response with error code or custom data
"""
errors = [] if custom_errors == None else custom_errors
final_status = (
Status.INTERNAL_SERVER_ERROR if custom_status == None else custom_status
)
if isinstance(exception, ZError):
errors.append(exception.get_error())
logger.add_info(exception.get_message())
self.close_operation(logger, exception, True)
return BResponse.err(
errors=errors,
operation_id=operation_id,
description=custom_message,
status=final_status,
payload=payload,
fn_transform=map,
)
else:
errors.append(str(Messages.GEN_MSG_ERROR))
logger.add_info(str(Messages.GEN_MSG_ERROR))
self.close_operation(logger, exception, True)
return BResponse.err(
errors=errors,
operation_id=operation_id,
description=custom_message,
status=final_status,
payload=payload,
fn_transform=map,
) | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/api/resource.py | resource.py |
from zpy.api import ZContract, ZContractStatus
from zpy.logger import TLogger, add_log_to, c_err, c_text
from zpy.api.exceptions import ZDBIntegrityError, ZDatabaseOperationError, ZError
from typing import Any, Dict, List, Optional, TypeVar
from zpy.db import DBConnection
from zpy.api.stages import DataAccess
from zpy.db.oracle import OracleType, ZParam
from zpy.db.transaction import ZOperation, ZTranstaction
import logging
import cx_Oracle
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
T = TypeVar("T")
class RepositoryResult(ZContract[T]):
def __init__(
self,
data: T = None,
status: ZContractStatus = ZContractStatus.PENDING,
errors: List[Any] = None,
) -> None:
super().__init__(data=data, status=status, errors=errors)
class ZRepository(object):
db: DBConnection
logger: TLogger = None
transact: ZTranstaction
def __init__(self, db: DBConnection = None) -> None:
super().__init__()
self.db = db
self.transact = None
def _check_transact(self):
if self.transact == None:
raise "Transactionable component did not initialize."
def set_transaction_dispatcher(self, transact: ZTranstaction):
self.transact = transact
def set_logger(self, logger: TLogger = None):
self.logger = logger
if self.logger != None:
self.logger.add_phase(DataAccess())
def release_db_connection(self, connection):
if self.db != None:
return self.db.release_connection(connection)
raise ZDatabaseOperationError("DB Connection didn't provided")
def get_db_connection(self, logger: TLogger = None):
if self.db != None:
return self.db.get_pool_connection(logger=logger)
raise ZDatabaseOperationError("DB Connection didn't provided")
def __show_db_error(
self,
e: Exception,
fn: str,
type_err: str,
native_params: Dict = None,
custom_params: Optional[List[ZParam]] = None,
logger: TLogger = None,
):
"""
Show error acording logger value, if logger is null print on console
RETURNS:
-------
added into logger
"""
logger = self.logger if logger == None else logger
if logger == None:
c_err("\n\tAN ERROR OCURRED WHEN EXECUTE DB FUNCTION\n", True)
c_text(f" Function: {fn}", True)
c_text(f" Type: {type_err}", True)
c_text(" With params: {}".format(native_params), True)
c_text("Custom params: {}".format(custom_params), True)
c_text("--------------------------------------------------", True)
c_text("Details:", True)
logging.exception(e)
c_err("--------------------------------------------------", True)
return False
else:
add_log_to("\n\tAN ERROR OCURRED WHEN EXECUTE DB FUNCTION\n", logger)
add_log_to(f" Function: {fn}", logger)
add_log_to(f" Type: {type_err}", logger)
add_log_to(f" With params: {native_params}", logger)
add_log_to(f"Custom params: {custom_params}", logger)
add_log_to("--------------------------------------------------", logger)
add_log_to("Details:", logger)
add_log_to(e, logger)
add_log_to("--------------------------------------------------", logger)
return True
def __extract_custom_errors(self, current_connection):
try:
cursor = current_connection.cursor()
status = cursor.var(cx_Oracle.NUMBER)
line = cursor.var(cx_Oracle.STRING)
lines = []
while True:
cursor.callproc("DBMS_OUTPUT.GET_LINE", (line, status))
if status.getvalue() == 0:
lines.append(line.getvalue())
else:
break
cursor.close()
return lines
except Exception as e:
print(e)
return []
def __get_custom_messgae(self, e: Exception):
try:
stringfy = str(e)
stringfy = stringfy[stringfy.index("::") + 2 :]
return stringfy[: stringfy.index("::")]
except Exception as e:
return "Unspecified error."
def exec(
self,
fn_name: str,
ret_type: OracleType,
params: dict = None,
model: Any = None,
connection=None,
custom_params: Optional[List[ZParam]] = None,
logger: TLogger = None,
) -> RepositoryResult:
"""
DB Funtion executor
Parameters
----------
fn : str | required
Function name with package name: PO_LINES_PKG.FN_GET_LINE
ret_type : OracleType | required
The return type of oracle db function
params : Dict | Optional
Set parameter that the oracle funtion expects
custom_params : Optional[List[ZParam, IntList, StrList, ClobList]] | Optional
Custom Set parameter that the oracle funtion expects, see avaialble custom types
model : marshmallow_objects.models.Model | Optional
Model specification where the db data will be volcated
connection : DB Connection | Optional
The db connection object, if it is not passed by params, it tries to get a global instance
Raises
------
NotValueProvided
Connection
ZDBException
DB General Exception
ZIDBException
Vilated DB Integrity
Returns
-------
result : RespositoryResult
The result set of oracle db function
"""
try:
result = self.execute(
fn_name=fn_name,
ret_type=ret_type,
params=params,
model=model,
connection=connection,
custom_params=custom_params,
logger=logger,
)
return RepositoryResult(data=result, status=ZContractStatus.SUCCESS)
except ZError as e:
return RepositoryResult(
status=ZContractStatus.ERROR, errors=[e.get_error()]
)
def execute(
self,
fn_name: str,
ret_type: OracleType,
params: dict = None,
model: Any = None,
connection=None,
custom_params: Optional[List[ZParam]] = None,
logger: TLogger = None,
):
"""
New feature for call oracle db functions
Use this function instead function 'call'
Parameters
----------
fn : str | required
Function name with package name: PO_LINES_PKG.FN_GET_LINE
ret_type : OracleType | required
The return type of oracle db function
params : Dict | Optional
Set parameter that the oracle funtion expects
custom_params : Optional[List[ZParam, IntList, StrList, ClobList]] | Optional
Custom Set parameter that the oracle funtion expects, see avaialble custom types
model : marshmallow_objects.models.Model | Optional
Model specification where the db data will be volcated
connection : DB Connection | Optional
The db connection object, if it is not passed by params, it tries to get a global instance
Raises
------
NotValueProvided
Connection
ZDBException
DB General Exception
ZIDBException
Vilated DB Integrity
Returns
-------
result set : Union[List[Model],int,float,str]
The result set of oracle db function
"""
logger = self.logger if logger == None else logger
if self.db is None:
add_log_to("DB Connection didn't provided in execute db function", logger)
raise ZDatabaseOperationError(
message="Impossible establish a connection with data provider.",
reason="Connection object did not provided to dispatcher.",
)
cn = connection
connection_passed = True
if connection == None:
connection_passed = False
cn = self.db.get_pool_connection()
try:
result_set = self.db.exec(
fn=fn_name,
ret_type=ret_type,
params=params,
model=model,
connection=cn,
custom_params=custom_params,
)
if connection_passed == False:
self.db.release_connection(cn)
return result_set
except cx_Oracle.IntegrityError as e:
if connection_passed == False:
self.db.release_connection(cn)
if (
self.__show_db_error(
e,
fn_name,
"Integrity Database Error",
params,
custom_params,
logger,
)
== False
):
add_log_to(e, logger)
raise ZDBIntegrityError(
d_except=e,
message=self.__get_custom_messgae(e),
reason="A data schema restriction is violated when attempting to process operations on the supplied data.",
)
except Exception as e:
if connection_passed == False:
self.db.release_connection(cn)
if (
self.__show_db_error(
e,
fn_name,
"Unspecified DB Exception",
params,
custom_params,
logger,
)
== False
):
add_log_to(e, logger)
raise ZDatabaseOperationError(
d_except=e,
message=self.__get_custom_messgae(e),
reason="When trying to process an operation with the database provider.",
)
def exec_txn(
self,
fn_name: str,
ret_type: OracleType,
table_name: str = None,
operation_type=ZOperation,
transaction: str = None,
params: dict = None,
model: Any = None,
connection=None,
custom_params: Optional[List[ZParam]] = None,
logger: TLogger = None,
) -> RepositoryResult:
self._check_transact()
result = self.exec(
fn_name=fn_name,
ret_type=ret_type,
params=params,
model=model,
connection=connection,
custom_params=custom_params,
logger=logger,
)
if result.status == ZContractStatus.SUCCESS:
row = result
if "INT" in operation_type.value:
row = params
else:
if ret_type == OracleType.number or ret_type == OracleType.decimal:
row = {"id": result}
self.transact.operation(operation_type, table_name, row, transaction)
return result
def execute_txn(
self,
fn_name: str,
ret_type: OracleType,
table_name: str = None,
operation_type=ZOperation,
transaction: str = None,
params: dict = None,
model: Any = None,
connection=None,
custom_params: Optional[List[ZParam]] = None,
logger: TLogger = None,
) -> Any:
"""
Can be throw exception
"""
self._check_transact()
result = self.execute(
fn_name=fn_name,
ret_type=ret_type,
params=params,
model=model,
connection=connection,
custom_params=custom_params,
logger=logger,
)
if result != None:
row = result
if "INT" in operation_type.value:
row = params
else:
if ret_type == OracleType.number or ret_type == OracleType.decimal:
row = {"id": result}
self.transact.operation(operation_type, table_name, row, transaction)
return result
def insert(
self,
fn_name: str,
ret_type: OracleType,
table_name: str = None,
transaction: str = None,
params: dict = None,
model: Any = None,
connection=None,
custom_params: Optional[List[ZParam]] = None,
logger: TLogger = None,
) -> Any:
"""
Zurck'z
Can be throw an exception
"""
self._check_transact()
result = self.execute(
fn_name=fn_name,
ret_type=ret_type,
params=params,
model=model,
connection=connection,
custom_params=custom_params,
logger=logger,
)
if result != None:
row = result
if ret_type == OracleType.number or ret_type == OracleType.decimal:
row = {"id": result}
self.transact.insert(table=table_name, data=row, transaction=transaction)
return result
def int_insert(
self,
fn_name: str,
ret_type: OracleType,
table_name: str = None,
transaction: str = None,
params: dict = None,
model: Any = None,
connection=None,
custom_params: Optional[List[ZParam]] = None,
logger: TLogger = None,
) -> Any:
"""
Zurck'z
Can be throw an exception
"""
self._check_transact()
result = self.execute(
fn_name=fn_name,
ret_type=ret_type,
params=params,
model=model,
connection=connection,
custom_params=custom_params,
logger=logger,
)
if result != None:
self.transact.int_insert(
table=table_name, data=params, transaction=transaction
)
return result
def update(
self,
fn_name: str,
ret_type: OracleType,
table_name: str = None,
transaction: str = None,
params: dict = None,
model: Any = None,
connection=None,
custom_params: Optional[List[ZParam]] = None,
logger: TLogger = None,
) -> Any:
"""
Zurck'z
Can be throw an exception
"""
self._check_transact()
result = self.execute(
fn_name=fn_name,
ret_type=ret_type,
params=params,
model=model,
connection=connection,
custom_params=custom_params,
logger=logger,
)
if result != None:
row = result
if ret_type == OracleType.number or ret_type == OracleType.decimal:
row = {"id": result}
self.transact.update(table=table_name, data=row, transaction=transaction)
return result
def int_update(
self,
fn_name: str,
ret_type: OracleType,
table_name: str = None,
transaction: str = None,
params: dict = None,
model: Any = None,
connection=None,
custom_params: Optional[List[ZParam]] = None,
logger: TLogger = None,
) -> Any:
"""
Zurck'z
Can be throw an exception
"""
self._check_transact()
result = self.execute(
fn_name=fn_name,
ret_type=ret_type,
params=params,
model=model,
connection=connection,
custom_params=custom_params,
logger=logger,
)
if result != None:
self.transact.int_update(
table=table_name, data=params, transaction=transaction
)
return result
def delete(
self,
fn_name: str,
ret_type: OracleType,
table_name: str = None,
transaction: str = None,
params: dict = None,
model: Any = None,
connection=None,
custom_params: Optional[List[ZParam]] = None,
logger: TLogger = None,
) -> Any:
"""
Zurck'z
Can be throw an exception
"""
self._check_transact()
result = self.execute(
fn_name=fn_name,
ret_type=ret_type,
params=params,
model=model,
connection=connection,
custom_params=custom_params,
logger=logger,
)
if result != None:
row = result
if ret_type == OracleType.number or ret_type == OracleType.decimal:
row = {"id": result}
self.transact.delete(table=table_name, data=row, transaction=transaction)
return result
def int_delete(
self,
fn_name: str,
ret_type: OracleType,
table_name: str = None,
transaction: str = None,
params: dict = None,
model: Any = None,
connection=None,
custom_params: Optional[List[ZParam]] = None,
logger: TLogger = None,
) -> Any:
"""
Zurck'z
Can be throw an exception
"""
self._check_transact()
result = self.execute(
fn_name=fn_name,
ret_type=ret_type,
params=params,
model=model,
connection=connection,
custom_params=custom_params,
logger=logger,
)
if result != None:
self.transact.int_delete(
table=table_name, data=params, transaction=transaction
)
return result
def begin_txn(self, name: str):
"""
"""
self._check_transact()
self.transact.begin_txn(name)
def commit_txn(self, name: str = None, pop: bool = False):
"""
"""
self._check_transact()
return self.transact.commit(name, pop)
def get_transaction_store(self) -> dict:
self._check_transact()
return self.transact.store | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/api/repository.py | repository.py |
from zpy.utils import get_operation_id
from enum import Enum
from typing import Any
import json
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
PAYLOAD_KEY: str = "data"
STATUS_KEY: str = "status"
DESCRIPTION_KEY: str = "description"
OPERATION_ID_KEY: str = "operationId"
ERRORS_KEY: str = "errors"
class Status(Enum):
"""
Common HTTP status codes
CODE | SHORT DESCRIPTION | STATUS DETAILS
"""
SUCCESS = (200, "SUCCEEDED", "The request has succeeded")
CREATED = (201, "CREATED","The request has been fulfilled and resulted in a new resource being created.")
ACCEPTED = (202, "ACCEPTED","The request has been accepted for processing, but the processing has not been completed.")
NO_CONTENT = (204, "NO CONTENT","The request has been completed successfully but your response has no content, although the headers can be useful.",)
PARTIAL_CONTENT = (206, "PARTIAL CONTENT", "Partial content")
BAD_REQUEST = (400, "BAD REQUEST","The request could not be understood by the server due to malformed syntax. The client SHOULD NOT repeat the request without modifications.",)
UNAUTHORIZED = (401, "UNAUTHORIZED", "The request requires user authentication.")
FORBIDDEN = (403, "FORBIDDEN","The server understood the request, but is refusing to fulfill it.")
NOT_FOUND = (404, "NOT FOUND","The server has not found anything matching the Request-URI.",)
METHOD_NOT_ALLOWED = (405, "METHOD NOT ALLOWED","The method specified in the Request-Line is not allowed for the resource identified by the Request-URI.",)
CONTENT_NOT_ACCEPTABLE = (406, "METHOD NOT ACCEPTABLE","The resource identified by the request is only capable of generating response entities which have content characteristics not acceptable according to the accept headers sent in the request.",)
REQUEST_TIMEOUT = (408, "REQUEST TIMEOUT", "Time out")
PRE_CONDITION_FAILED = (412, "PRECONDITION FAILED","The client has indicated preconditions in their headers which the server does not meet.",)
UNSUPPORTED_MEDIA_TYPE = (415, "UNSUPPORTED MEDIA TYPE","The multimedia format of the requested data is not supported by the server, therefore the server rejects the request.",)
IM_A_TEAPOT = (418, "IM A TEAPOT","The server refuses to try to make coffee with a kettle.",)
CONFLICT = (409, "CONFLICT", "The server found conflict with request supplied.")
UNPROCESSABLE = (422, "UNPROCESSABLE ENTITY","The process could not be completed due to a semantics error.",)
LOCKED = (423, "LOCKED","The source or destination resource of a method is locked.",)
INTERNAL_SERVER_ERROR = (500, "INTERNAL SERVER ERROR","The server encountered an unexpected condition which prevented it from fulfilling the request.",)
NOT_IMPEMENTED = (501, "NOT IMPLEMENTED","The server does not support the functionality required to fulfill the request",)
SERVICE_UNAVAIBLE = (503, "SERVICE UNAVAIBLE","The server is currently unable to handle the request due to a temporary overloading or maintenance of the server.",)
GATEWAY_TIMEOUT = (503, "GATEWAY TIMEOUT", "Timeout")
LOOP_DETECTED = (508, "LOOP DETECTED","The server encountered an infinite loop while processing the request. ",)
class Error:
pass
class Builder:
"""
Common resopnse builder
"""
response = {
PAYLOAD_KEY: {},
DESCRIPTION_KEY: Status.SUCCESS.value[2],
STATUS_KEY: Status.SUCCESS.value[1],
OPERATION_ID_KEY: None,
}
response_code = 200
def __init__(self, custom_schema=None) -> None:
if custom_schema is not None:
self.response = custom_schema
def add_custom(self, key: str, data: Any):
"""Add custom item to object body.
PD: Replacemnt item if response body have a item
Parameters
----------
key : str | required
The item key
data : Any | required
The item value
Raises
------
NotValueProvided
If no values is set for the response or passed in as a
parameter.
"""
self.response[key] = data
return self
def add_error(self, error: any):
"""Add error to erros object body.
PD: Replacemnt item if response body have a item
Parameters
----------
error : Any, str, dict | required
The item error response
Raises
------
NotErrorProvided
If no status is set for the response or passed in as a
parameter.
"""
if ERRORS_KEY in self.response:
self.response[ERRORS_KEY].append(error)
else:
self.response[ERRORS_KEY] = [error]
return self
def add_payload(self, payload):
"""Add data payload to reponse body.
PD: Replacemnt item if response body have a item
Parameters
----------
payload : Any, str, dict | required
The data response
Raises
------
NotPayloadProvided
If no status is set for the response or passed in as a
parameter.
"""
self.response[PAYLOAD_KEY] = payload
return self
def add_description(self, description: str):
"""Add description to reponse body.
PD: Replacemnt if response body have a description
Parameters
----------
description : str | required
The description response
Raises
------
NotDescriptionProvided
If no status is set for the response or passed in as a
parameter.
"""
self.response[DESCRIPTION_KEY] = description
return self
def add_operation_id(self, id: str):
"""Add operation to reponse body.
PD: Replacemnt if response body have a item
Parameters
----------
id : str | required
The operation id response
Raises
------
NotValueProvided
If no status is set for the response or passed in as a
parameter.
"""
self.response[OPERATION_ID_KEY] = id
return self
def add_status(self, status: Status):
"""Add status to reponse body.
The status value is a tuple conformed by 3 parts:
CODE | SHORT DESCRIPTION | STATUS DETAILS
Parameters
----------
status : Status | Tuple, required
The response information
Raises
------
NotStatusProvided
If no status is set for the response or passed in as a
parameter.
"""
self.response[STATUS_KEY] = status.value[1]
self.response_code = status.value[0]
self.response[DESCRIPTION_KEY] = status.value[2]
return self
@staticmethod
def success(
payload={},
status_code: Status = Status.SUCCESS,
operation_id=None,
description: str = None,
fn_transform = None
):
response = {}
response[OPERATION_ID_KEY] = (
get_operation_id() if operation_id is None else operation_id
)
response[PAYLOAD_KEY] = payload
response[DESCRIPTION_KEY] = (
status_code.value[2] if description is None else description
)
response[STATUS_KEY] = status_code.value[1]
if fn_transform != None:
response = fn_transform(response)
return response, status_code.value[0]
@staticmethod
def error(
payload={},
errors=[],
status=Status.INTERNAL_SERVER_ERROR,
operation_id=None,
description: str = None,
fn_transform=None,
):
response = {}
response[OPERATION_ID_KEY] = (
get_operation_id() if operation_id is None else operation_id
)
response[ERRORS_KEY] = errors
response[PAYLOAD_KEY] = payload
response[DESCRIPTION_KEY] = (
status.value[2] if description is None else description
)
response[STATUS_KEY] = status.value[1]
if fn_transform != None:
response = fn_transform(response)
return response, status.value[0]
@staticmethod
def bad_request(
payload={},
errors=[],
status=Status.BAD_REQUEST,
operation_id=None,
description: str = None,
fn_transform=None,
):
response = {}
response[OPERATION_ID_KEY] = (
get_operation_id() if operation_id is None else operation_id
)
response[ERRORS_KEY] = errors
response[PAYLOAD_KEY] = payload
response[DESCRIPTION_KEY] = (
status.value[2] if description is None else description
)
response[STATUS_KEY] = status.value[1]
if fn_transform != None:
response = fn_transform(response)
return response, status.value[0]
@staticmethod
def no_content(
payload={},
errors=[],
status=Status.NO_CONTENT,
operation_id=None,
description: str = None,
fn_transform=None,
):
response = {}
response[OPERATION_ID_KEY] = (
get_operation_id() if operation_id is None else operation_id
)
response[ERRORS_KEY] = errors
response[PAYLOAD_KEY] = payload
response[DESCRIPTION_KEY] = (
status.value[2] if description is None else description
)
response[STATUS_KEY] = status.value[1]
if fn_transform != None:
response = fn_transform(response)
return response, status.value[0]
def build(self,fn_transform=None) -> Any:
if self.response[OPERATION_ID_KEY] is None:
self.response[OPERATION_ID_KEY] = get_operation_id()
if fn_transform != None:
self.response = fn_transform(self.response)
return self.response, self.response_code
def str_json_build(self,fn_transform) -> Any:
if self.response[OPERATION_ID_KEY] is None:
self.response[OPERATION_ID_KEY] = get_operation_id()
if fn_transform != None:
self.response = fn_transform(self.response)
return json.dumps(self.response), self.response_code
class BResponse(Builder):
"""
Zurck'z Common Builder Response
"""
def err(
payload=None,
errors=[],
status=Status.INTERNAL_SERVER_ERROR,
operation_id=None,
description: str = None,
fn_transform=None,
):
"""
Build a error response with None payload
"""
return Builder.error(
payload, errors, status, operation_id, description, fn_transform
) | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/api/reponse.py | reponse.py |
from enum import Enum
from typing import Dict
from zpy.api.stages import Unspecified
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
class ErrorDomain(Enum):
GLOBAL = ("GLOBAL", "")
IN_PHASE = ("IN PHASE", "")
INVALID_REQUEST = ("INVALID REQUEST", "")
INVALID_PARAMETER = ("INVALID PARAMETER", "")
class ErrorCodes(Enum):
UNSPECIFIED = 99
GENERAL = 100
BAD_REQUEST = 101
INVALID_TYPE_REQUEST = 102
DB_OPERATION = 103
SERVICE_PROCESS = 104
# DOMAIN_ERROR_KEY = 'domain'
REASON_ERROR_KEY = "reason"
MESSAGE_ERROR_KEY = "message"
METADATA_KEY = "metadata"
CODE_ERROR_KEY = "code"
# STAGE_ERROR_KEY = 'stage'
class ErrorBuilder(object):
error = {
# DOMAIN_ERROR_KEY : ErrorDomain.GLOBAL,
REASON_ERROR_KEY: "",
MESSAGE_ERROR_KEY: "",
CODE_ERROR_KEY: "",
# STAGE_ERROR_KEY : Unspecified().name,
METADATA_KEY: None,
}
@staticmethod
def common(reason: str, message: str, code: ErrorCodes = ErrorCodes.GENERAL, meta=None):
return {
# DOMAIN_ERROR_KEY: domain.value[0],
REASON_ERROR_KEY: reason,
MESSAGE_ERROR_KEY: message,
CODE_ERROR_KEY: code.value,
METADATA_KEY: meta,
# STAGE_ERROR_KEY: stage.name,
}
# def add_stage(self, stage:Stage = Unspecified()):
# self.error[STAGE_ERROR_KEY] = stage.name
# return self
def add_meta(self, meta: Dict = Unspecified()):
self.error[METADATA_KEY] = meta
return self
# def add_domain(self, domain:ErrorDomain = ErrorDomain.GLOBAL):
# self.error[DOMAIN_ERROR_KEY] = domain.value[0]
# return self
def add_reason(self, reason: str = ""):
self.error[REASON_ERROR_KEY] = reason
return self
def add_message(self, message: str = ""):
self.error[MESSAGE_ERROR_KEY] = message
return self
def add_code(self, code: ErrorCodes = ErrorCodes.UNSPECIFIED):
self.error[CODE_ERROR_KEY] = code
return self
def build(self) -> dict:
return self.error | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/api/errors.py | errors.py |
from abc import ABC, abstractmethod
from flask import request
from zpy.logger import g_log
from zpy.api.errors import ErrorBuilder
from flask import Flask
from zpy.utils.Encryptor import AESEncryptor
from zpy.api.stages import Encryption, ResourceGateway
from zpy.api.reponse import Builder, Status
import json
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "upax"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
class ZHook(ABC):
@abstractmethod
def execute(self, app: Flask, **kwargs):
pass
# Custom hooks
class AESEncryptHook(ZHook):
def execute(self, app, **kwargs):
@app.after_request
def encrypt(response: Flask):
try:
if response.data:
parsed = json.loads(response.data)
aes: str = None
if (
kwargs != None and "aes_sk" in kwargs
): # ! WARNING HARD KEY FOR EXTARCT AES SK
aes = kwargs["aes_sk"]
encrypted = AESEncryptor.encrypt_ws_request(parsed, secret_key=aes)
response.data = json.dumps(encrypted)
return response
except Exception as e:
stage = Encryption()
g_log(e, stage)
return Builder().error(
{},
errors=[
ErrorBuilder.common(
"Encryption hook throw exception",
"The response catched by hook have invalid structure",
stage,
)
],
)
return super().execute(app)
class AESDecryptHook(ZHook):
def execute(self, app: Flask, **kwargs):
@app.before_request
def decrypt():
try:
if request.data:
aes: str = None
if (
kwargs != None and "aes_sk" in kwargs
): # ! WARNING HARD KEY FOR EXTARCT AES SK
aes = kwargs["aes_sk"]
decrypt_data = AESEncryptor.decrypt_ws_response(
request.get_json(), aes
)
request.data = json.dumps(decrypt_data)
return request
except Exception as e:
stage = Encryption()
g_log(e, stage)
return Builder().error(
{},
errors=[
ErrorBuilder.common(
"Encryption hook throw exception",
"The response catched by hook have invalid structure",
stage,
)
],
)
return super().execute(app)
class NotFoundHook(ZHook):
def execute(self, app: Flask, **kwargs):
@app.errorhandler(404)
def not_found(e):
try:
g_log(e, ResourceGateway())
return (
Builder()
.add_status(Status.NOT_FOUND)
.add_error("Resource not exist")
.add_error(str(e))
.build()
)
except Exception:
pass
return super().execute(app)
class TearDownHook(ZHook):
def execute(self, app: Flask, **kwargs):
@app.teardown_appcontext
def dispose(exception):
try:
print("Teardown Micro Service")
except Exception:
pass
return super().execute(app) | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/api/hooks.py | hooks.py |
from enum import IntEnum
from zpy.api.hooks import NotFoundHook, ZHook
from zpy.api.middleware import ZMiddleware, ParserMiddleWare
from functools import wraps
from typing import Any, Dict, Generic, List, Optional, TypeVar
from flask.views import MethodView
from flask_restful import Api
from flask import Flask
from flask_cors import CORS
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
# Custom Type for typing generic functions
T = TypeVar("T")
## Class ApiConfiguration
# Multiple environment variables for microservices
#
class ApiConfiguration:
DEBUG: bool = False
TESTING: bool = False
CSRF_ENABLED: bool = False
SECRET: str = ""
DB_URI: str = ""
DB_SERVICE: str = ""
DB_PORT: str = ""
DB_USER: str = ""
DB_PASSWORD: str = ""
ENVIRONMENT: str = ""
CUSTOM: Dict = {}
def to_dict(self) -> dict:
return {
"DEBUG": self.DEBUG,
"TESTING": self.TESTING,
"CSRF_ENABLED": self.CSRF_ENABLED,
"SECRET": self.SECRET,
"DB_URI": self.DB_URI,
"DB_SERVICE": self.DB_SERVICE,
"DB_PORT": self.DB_PORT,
"DB_USER": self.DB_USER,
"DB_PASSWORD": self.DB_PASSWORD,
"ENVIRONMENT": self.ENVIRONMENT,
"CUSTOM": self.CUSTOM,
}
## Class ZResource | Zurck'z Resources
# Resource class, contains resources and path related to it
#
class ZResource:
"""
Micro service resource for api.
"""
def __init__(self, path: str, resource: MethodView, **kwargs) -> None:
self.resource = resource
self.path = path
self.kwargs = kwargs
def create_app(
config: ApiConfiguration,
resources: List[ZResource],
main_path: str,
path_cors_allow=None,
) -> Flask:
"""
API Builder
"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(config)
path_allow = path_cors_allow
if path_cors_allow is None:
path_allow = r"{}{}".format(main_path, "/*")
CORS(app, resources={path_allow: {"origins": "*"}})
api = Api(app)
[
api.add_resource(
r.resource,
"{0}{1}".format(main_path, r.path),
resource_class_kwargs=r.kwargs,
)
for r in resources
]
return app
def api(
base: str = "",
config: ApiConfiguration = None,
middlewares: List[ZMiddleware] = [],
middlewares_args: List[dict] = [],
hooks: List[ZHook] = [],
hooks_args: List[dict] = [],
path_cors_allow=None,
) -> Flask:
"""
Micro Service Setup
"""
if config is None:
raise Exception("Api Configurations were not provided")
def callable(api_definitor) -> Flask:
@wraps(api_definitor)
def wrapper(*args, **kwargs) -> Flask:
resources: List[ZResource] = api_definitor(*args, **kwargs)
app: Flask = create_app(config, resources, base, path_cors_allow)
middlewares.append(ParserMiddleWare)
middlewares_args.append({})
hooks.append(NotFoundHook)
hooks_args.append({})
for i, m in enumerate(middlewares):
args = middlewares_args[i] if i < len(middlewares_args) else {}
args.update(config.CUSTOM)
app.wsgi_app = m(app.wsgi_app, **args)
for i, h in enumerate(hooks):
args = hooks_args[i] if i < len(hooks_args) else {}
args.update(config.CUSTOM)
h().execute(app, **args)
return app
return wrapper
return callable
class ZContractStatus(IntEnum):
ERROR = -1
SUCCESS = 1
PENDING = 0
class ZContract(Generic[T]):
data: T = None
status: ZContractStatus = None
errors: List[Dict] = None
def __init__(
self,
data: Optional[T] = None,
status: ZContractStatus = ZContractStatus.PENDING,
errors: Optional[List[Any]] = None,
) -> None:
super().__init__()
self.data = data
self.status = status
self.errors = errors | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/api/__init__.py | __init__.py |
from marshmallow_objects import models
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional
from copy import copy
import json
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
#
# Zurck'z implementation
class ZModel(models.Model):
"""
Zurckz Model
"""
__remove_keys: List[str] = [
"__dump_lock__",
"__schema__",
"__missing_fields__",
"__setattr_func__",
"_ZModel__remove_keys",
"_ZModel__update_items",
]
__update_items = {}
def __init__(
self,
exclude: Optional[List[str]] = None,
include: Optional[Dict[Any, Any]] = None,
context=None,
partial=None,
**kwargs
):
super().__init__(context=context, partial=partial, **kwargs)
if exclude != None:
self.__remove_keys = self.__remove_keys + exclude
if include != None:
self.__update_items = include
def __str__(self):
"""
Dump nested models by own properties
"""
data = copy(self.__dict__)
if self.__update_items != None:
data.update(self.__update_items)
[data.pop(k, None) for k in self.__remove_keys]
for k in data.keys():
if isinstance(data[k], models.Model):
data[k] = json.loads(str(data[k]))
elif isinstance(data[k], list):
data[k] = [json.loads(str(it)) for it in data[k]]
elif isinstance(data[k], datetime):
data[k] = str(data[k])
return json.dumps(data)
def zdump(self):
"""
Dump nested models by own properties
"""
data = copy(self.__dict__)
if self.__update_items != None:
data.update(self.__update_items)
[data.pop(k, None) for k in self.__remove_keys]
for k in data.keys():
if isinstance(data[k], models.Model):
data[k] = json.loads(str(data[k]))
elif isinstance(data[k], list):
data[k] = [json.loads(str(it)) for it in data[k]]
elif isinstance(data[k], datetime):
data[k] = str(data[k])
return data
def sdump(
self,
exclude_keys: Optional[List[str]] = None,
include: Optional[Dict[Any, Any]] = None,
map: Optional[Callable[[Dict], Dict]] = None,
map_args: Optional[List[Any]] = [],
store_ex: bool = False,
store_in: bool = False,
):
"""
Model dump to json safely, checking the exclude key list
Use this function instead of zdump.
Parameters:
-----------
exclude_keys: List[str], Optional,
List of string keys of exlude in dump process
include: Dict[Any,Any], Optional,
Object to include in model object after exclude process before of dump process
map: Callable, Optional
Callable function to tranform object after exclude and include process
map_args: List[Any], Optional
Argument list to passed to map callable function
store_ex: bool, optional
Indicate that the exclude key added to global model exclude key array
store_in: bool, optional
Indicate that the include object added to global model object
"""
data = copy(self.__dict__)
temp_exclude = copy(self.__remove_keys)
if exclude_keys != None:
temp_exclude = self.__remove_keys + exclude_keys
if store_ex:
self.__remove_keys = self.__remove_keys + exclude_keys
[data.pop(k, None) for k in temp_exclude]
temp_include = copy(self.__update_items)
if include != None:
temp_include.update(include)
data.update(temp_include)
if store_in:
self.__update_items.update(include)
else:
if temp_include != None:
data.update(temp_include)
if map != None:
data = map(data, *map_args)
for k in data.keys():
if isinstance(data[k], models.Model):
data[k] = json.loads(str(data[k]))
elif isinstance(data[k], list):
# data[k] = [json.loads(str(it)) for it in data[k]]
inner_list = []
for it in data[k]:
if isinstance(it, str):
inner_list.append(it)
else:
inner_list.append(json.loads(str(it)))
data[k] = inner_list
elif isinstance(data[k], datetime):
data[k] = str(data[k])
return data
def build(self):
data = copy(self.__dict__)
if self.__update_items != None:
data.update(self.__update_items)
[data.pop(k, None) for k in self.__remove_keys]
return data | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/api/models.py | models.py |
from zpy.api.stages import Stage
from io import StringIO
from enum import Enum
import logging
FORMAT = "ZR: %(asctime)s - %(name)s - %(levelname)s - %(message)s"
# logging.basicConfig(format=FORMAT)
class LColors(Enum):
CEND = "\033[0m"
CBLACK = "\33[30m"
CRED = "\33[31m"
CGREEN = "\33[32m"
CYELLOW = "\33[33m"
CBLUE = "\33[34m"
CVIOLET = "\33[35m"
CBEIGE = "\33[36m"
CWHITE = "\33[37m"
def line(length: int = 100, char: str = "=") -> str:
"""
Helper function
"""
return "".join([char for x in range(length)])
def g_log(msg, phase: Stage):
print(line())
print(phase.title())
print(phase.desc())
print(msg)
print(line())
def c_warn(msg, single=False, prefix: str = ""):
if single:
print(f"{LColors.CYELLOW.value}{prefix}{msg}{LColors.CEND.value}")
else:
print(f"{LColors.CYELLOW.value} [ZPy WARNING]: {msg}{LColors.CEND.value}")
def c_err(msg, single=False, prefix: str = ""):
if single:
print(f"{LColors.CRED.value}{prefix}{msg}{LColors.CEND.value}")
else:
print(f"{LColors.CRED.value} [ZPy ERROR]: {msg}{LColors.CEND.value}")
def c_info(msg, single=False, prefix: str = ""):
if single:
print(f"{LColors.CBLUE.value}{prefix}{msg}{LColors.CEND.value}")
else:
print(f"{LColors.CBLUE.value} [ZPy INFORMATION]: {msg}{LColors.CEND.value}")
def c_text(msg, single=False, prefix: str = ""):
if single:
print(f"{LColors.CWHITE.value}{prefix}{msg}{LColors.CEND.value}")
else:
print(f"{LColors.CWHITE.value} [ZPy]: {msg}{LColors.CEND.value}")
class TLogger:
logger_name = "TradeLogger"
def __init__(self, name=None, operation_id=None) -> None:
self.log = logging.getLogger(self.logger_name if name is None else name)
if self.log.handlers:
for handler in self.log.handlers:
self.log.removeHandler(handler)
self.stream = StringIO()
self.handler = logging.StreamHandler(self.stream)
self.handler.setFormatter(logging.Formatter(FORMAT))
self.log.addHandler(self.handler)
self.log.setLevel(logging.DEBUG)
self.operation_id = operation_id
self.log.info(line())
self.log.info("[ START OPERATION ID :: {0}]".format(self.operation_id))
self.log.info(line())
def add_error(self, error) -> None:
self.log.error(error)
def add_exception(self, exception) -> None:
self.log.error(exception, exc_info=True)
def add_warning(self, warning) -> None:
self.log.error(warning)
def add_info(self, info) -> None:
self.log.info(info)
def add_phase(self, phase: Stage):
self.log.info(line())
self.log.info("[ STARTING STAGE]")
self.log.info(phase.title())
self.log.info(phase.desc())
self.log.info(line())
def show_stack_trace(self):
self.log.info(line())
print(self.stream.getvalue())
def dispose(self):
self.handler.close()
self.log.removeHandler(self.handler)
def add_log_to(log: any, logger: TLogger):
if logger != None:
if isinstance(log, Exception):
logger.add_exception(log)
else:
logger.add_info(log) | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/logger/__init__.py | __init__.py |
from enum import Enum
class ZOperation(Enum):
INSERT = "INSERT" # Insert that return nre id
UPDATE = "UPDATE" # Update that return row before updated
DELETE = "DELETE" # Delete return row deleted
INT_INSERT = "IN_INSERT" # Insert that return row with data inserted
INT_UPDATE = "IN_UPDATE" # Update that return row with data inserted
INT_DELETE = "IN_DELETE" # Delete return row with data inserted
class ZTranstaction(object):
default_transaction: str = "default"
store = None
def __init__(self) -> None:
super().__init__()
self.store = {self.default_transaction: {}}
def begin_txn(self, name: str):
self.store[name] = {}
def operation(
self, operation: ZOperation, table: str, data: dict, transaction: str = None
):
transact = transaction or self.default_transaction
operation = {"type": operation.value, "data": data}
if table in self.store[transact]:
self.store[transact][table].append(operation)
else:
self.store[transact][table] = [operation]
def insert(self, table: str, data: dict, transaction: str = None):
self.operation(ZOperation.INSERT, table, data, transaction)
def update(self, table: str, data: dict, transaction: str = None):
self.operation(ZOperation.UPDATE, table, data, transaction)
def delete(self, table: str, data: dict, transaction: str = None):
self.operation(ZOperation.DELETE, table, data, transaction)
def int_insert(self, table: str, data: dict, transaction: str = None):
self.operation(ZOperation.INT_INSERT, table, data, transaction)
def int_update(self, table: str, data: dict, transaction: str = None):
self.operation(ZOperation.INT_UPDATE, table, data, transaction)
def int_delete(self, table: str, data: dict, transaction: str = None):
self.operation(ZOperation.INT_DELETE, table, data, transaction)
def commit(self, name: str = None, pop: bool = False):
print(self.store)
transact = name
if name not in self.store or name == None:
transact = self.default_transaction
if pop:
return self.store.pop(transact, None)
return self.store[transact] | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/db/transaction.py | transaction.py |
from abc import ABC, abstractmethod
from zpy.logger import TLogger
from typing import Any, Dict, Optional, TypeVar
from marshmallow_objects import models
T = TypeVar("T")
class DBConnection(ABC):
@abstractmethod
def init_local_client(self, path: str):
"""
Initialize local client
"""
pass
@abstractmethod
def connect(self):
pass
@abstractmethod
def close(self):
pass
@abstractmethod
def is_connected(self):
pass
@abstractmethod
def get_connection(self):
pass
@abstractmethod
def execute(
self,
function: str,
type,
parameters: dict = None,
pool_connection=None,
):
pass
@abstractmethod
def get_pool_connection(self, logger: TLogger):
pass
@abstractmethod
def release_connection(self, connection) -> bool:
pass
@abstractmethod
def call(self, fn: str, type, params, schmea: T):
pass
@abstractmethod
def close_pool(self):
pass
@abstractmethod
def initialize_pool(
self,
dns=None,
homogeneous: bool = False,
max: int = 5,
user=None,
pwd=None,
min: int = 1,
threaded: bool = False,
):
pass
@abstractmethod
def exec(
self,
fn: str,
ret_type: Any,
params: Optional[Dict] = None,
model: Optional[models.Model] = None,
connection=None,
):
"""
New feature for call oracle db functions
Use this function instead function 'call'
Parameters
----------
fn : str | required
Function name with package name: PO_LINES_PKG.FN_GET_LINE
ret_type : OracleType | required
The return type of oracle db function
params : Dict | Optional
Set parameter that the oracle funtion expects
model : marshmallow_objects.models.Model | Optional
Model specification where the db data will be volcated
connection : DB Connection | Optional
The db connection object, if it is not passed by params, it tries to get a global instance
Raises
------
NotValueProvided
Connection
Returns
-------
result set : Union[List[Model],int,float,str]
The result set of oracle db function
"""
pass | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/db/__init__.py | __init__.py |
from zpy.logger import TLogger, c_info
from zpy.db.utils import get_current_schema
from marshmallow_objects import models
from typing import Any, Dict, List, Optional, Union
from zpy.db import DBConnection
from zpy.utils.funcs import exec_ifnt_null, safely_exec
from enum import Enum
from marshmallow import Schema
import cx_Oracle
import json
import logging
# from . import T
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
class OracleType(Enum):
cursor = cx_Oracle.CURSOR
number = cx_Oracle.NUMBER
string = cx_Oracle.STRING
integer = cx_Oracle.NUMBER
decimal = cx_Oracle.NUMBER
class OracleParam(Enum):
LIST_INTEGR = "LIST_INTEGER"
LIST_STR = "LIST_VARCHAR"
LIST_CLOB = "LIST_CLOB"
class ZParam:
def __init__(
self,
value: Union[List[int], List[float], List[str], List[Any]],
paramType: OracleParam,
key: str,
origin: str = None,
) -> None:
self.value = value
self.paramType = paramType
self.key = key
self.origin = origin
class IntList(ZParam):
def __init__(self, value: List[int], key: str, origin: str = None) -> None:
super().__init__(value, OracleParam.LIST_INTEGR, key, origin)
class StrList(ZParam):
def __init__(self, value: List[str], key: str, origin: str = None) -> None:
super().__init__(value, OracleParam.LIST_STR, key, origin)
class ClobList(ZParam):
def __init__(self, value: List[Any], key: str, origin: str) -> None:
super().__init__(value, OracleParam.LIST_CLOB, key, origin)
class OracleDB(DBConnection):
__local_client_initialized: bool = False
__local_client_path: str = None
__config_connection: dict = None
__connection = None
__is_connected: bool = False
__pool = None
__pool_created: bool = False
__schemas: List[Dict] = None
__env: str = None
__verbose: bool = False
# * Pool configurations
__max: int = 5
__min: int = 1
__threading: bool = False
__homogeneus: bool = True
def __init__(
self,
config: dict = None,
local_client_path: str = None,
schemas: List[Dict] = None,
env: str = None,
verbose: bool = False,
) -> None:
self.__local_client_path = local_client_path
self.__config_connection = config
self.__schemas = schemas
self.__env = env
self.__verbose = verbose
def init_local_client(self, path: str):
if self.__local_client_initialized:
return
value = path if self.__local_client_path is None else self.__local_client_path
try:
if value is None:
raise Exception("Local client path not provided.")
cx_Oracle.init_oracle_client(lib_dir=value)
self.__local_client_initialized = True
except Exception as e:
self.__local_client_initialized = False
logging.exception(e)
def __data_connection_checker(self, config: dict = None, mode="TSN") -> str:
values = (
config if self.__config_connection is None else self.__config_connection
)
if values is None:
raise Exception("The data for the connection was not provided")
server = values["DB_URI"]
port = values["DB_PORT"]
service = values["DB_SERVICE"]
user = values["DB_USER"]
password = values["DB_PASSWORD"]
if mode == "DSN":
return "{0}:{1}/{2}".format(server, port, service)
return "{0}/{1}@{2}:{3}/{4}".format(user, password, server, port, service)
def get_tsn_dsn_conenction(self, config: dict, mode="TSN") -> str:
return self.__data_connection_checker(config, mode)
def connect(self, config: dict = None):
"""
Start oracle connection
"""
if self.__is_connected:
return True
try:
str_connection = self.__data_connection_checker(config)
self.__connection = cx_Oracle.connect(str_connection)
self.__is_connected = True
return True
except Exception as e:
raise e
def close(self):
if self.__is_connected:
self.__connection.close()
self.__is_connected = False
self.is_connected = False
def is_connected(self) -> bool:
return self.__is_connected
def get_connection(self):
return self.__connection
def initialize_pool(
self,
dns=None,
homogeneous: bool = False,
max: int = 5,
user=None,
pwd=None,
min: int = 1,
threaded: bool = False,
) -> bool:
if self.__pool_created or dns is None:
return False
print("Initializing Pool")
self.__pool = cx_Oracle.SessionPool(
user=user,
password=pwd,
dsn=dns,
homogeneous=homogeneous,
encoding="UTF-8",
max=max,
min=min,
threaded=threaded,
)
self.__pool_created = True
self.__min = min
self.__max = max
self.__threading = threaded
self.__homogeneus = homogeneous
print("Pool Started Successfuly")
return True
def close_pool(self, force: bool = False):
try:
if self.__pool_created:
self.__pool.close(force=force)
self.__pool_created = False
print("Pool Closed Successfuly")
except Exception as e:
logging.exception(e)
def get_pool_connection(self, logger: TLogger = None):
if self.__pool_created:
return self.__pool.acquire()
exec_ifnt_null(
lambda l: l.add_info("DB POOL NOT INITILIZED, TRY INITIALIZE AGAIN"),
args=[logger],
)
try:
self.initialize_pool(
dns=self.get_tsn_dsn_conenction(self.__config_connection, "DSN"),
homogeneous=self.__homogeneus,
user=self.__config_connection["DB_USER"],
pwd=self.__config_connection["DB_PASSWORD"],
max=self.__max,
min=self.__min,
threaded=self.__threading,
)
self.__pool_created = True
return self.__pool.acquire()
except Exception as e:
logger.add_exception(e)
self.__pool_created = False
def release_connection(self, connection) -> bool:
try:
if self.__pool_created:
self.__pool.release(connection)
return True
except Exception as e:
print("CATCH EXCEPTION WHEN TRY RELEASE POOL CONNECTION")
logging.exception(e)
return False
def __proccess_result(self, result_set, type: OracleType, schema: Schema = None):
if type == OracleType.cursor:
columns = [field[0] for field in result_set.description]
if schema is None:
columns = [field[0] for field in result_set.description]
rows = [d for d in result_set]
data = [dict(zip(columns, row)) for row in rows]
for d in data:
for key, value in d.items():
if isinstance(d[key], cx_Oracle.LOB):
d[key] = json.loads(str(value))
return {"hasError": False, "data": json.dumps(data, default=str)}
else:
# [print(dict(zip(columns, r))) for r in result_set]
return [schema.load(dict(zip(columns, r))) for r in result_set]
elif OracleType.number:
try:
return float(result_set)
except:
return result_set
else:
return str(result_set)
def execute(
self,
function: str,
type: OracleType,
parameters: dict = None,
pool_connection=None,
):
"""
Execute or call oracle functions - FN v0.0.1 | Core v0.0.1
"""
if pool_connection is not None:
cursor = pool_connection.cursor()
else:
cursor = self.__connection.cursor()
if self.__verbose:
self.show_info(function, parameters, type, None, None, None)
try:
db_execute = (
cursor.callfunc(function, type.value, keywordParameters=parameters)
if parameters != None
else cursor.callfunc(function, type.value)
)
if type == OracleType.cursor:
columns = [field[0] for field in db_execute.description]
rows = [d for d in db_execute]
data = [dict(zip(columns, row)) for row in rows]
for d in data:
for key, value in d.items():
if isinstance(d[key], cx_Oracle.LOB):
d[key] = json.loads(str(value))
db_dto = {"hasError": False, "data": json.dumps(data, default=str)}
elif OracleType.number:
db_dto = {"hasError": False, "data": str(db_execute)}
else:
db_dto = {"hasError": False, "data": db_execute}
except Exception as e:
db_dto = {"hasError": True, "data": str(e)}
safely_exec(lambda c: c.close(), args=[cursor]) # * Close cursor
return db_dto
def call(
self,
fn: str,
type: OracleType,
params: dict,
schema: Schema = None,
pool_connection=None,
):
"""
Execute or call oracle functions - FN v0.0.1 | Core v0.0.2
"""
if pool_connection is not None:
cursor = pool_connection.cursor()
else:
cursor = self.__connection.cursor()
if self.__verbose:
self.show_info(fn, params, type, schema, None, None)
result_set = (
cursor.callfunc(fn, type.value, keywordParameters=params)
if params != None
else cursor.callfunc(fn, type.value)
)
safely_exec(lambda c: c.close(), args=[cursor]) # * Close cursor
return self.__proccess_result(result_set, type, schema)
def exec(
self,
fn: str,
ret_type: OracleType,
params: Optional[Dict] = None,
custom_params: Optional[List[ZParam]] = None,
model: Optional[models.Model] = None,
connection=None,
db_schema: str = None,
env: str = None,
):
"""
Execute or call oracle functions - FN v0.0.1 | Core v0.0.7
New feature for call oracle db functions
Use this function instead function 'call'
Parameters
----------
fn : str | required
Function name with package name: PO_LINES_PKG.FN_GET_LINE
ret_type : OracleType | required
The return type of oracle db function
params : Dict | Optional
Set parameter that the oracle funtion expects
custom_params : Optional[List[ZParam, IntList, StrList, ClobList]] | Optional
Custom Set parameter that the oracle funtion expects, see avaialble custom types
model : marshmallow_objects.models.Model | Optional
Model specification where the db data will be volcated
connection : DB Connection | Optional
The db connection object, if it is not passed by params, it tries to get a global instance
Raises
------
NotValueProvided
Connection
Returns
-------
result set : Union[List[Model],int,float,str]
The result set of oracle db function
"""
cursor = None
if connection is not None:
cursor = connection.cursor()
else:
cursor = self.__connection.cursor()
if connection is None:
raise Exception("Can't get db connection")
if db_schema is None and self.__schemas is not None:
db_schema = get_current_schema(self.__schemas, env, self.__env)
if custom_params != None and len(custom_params) > 0:
if params == None:
params = {}
# * Find the current env for extract the schema
for custom in custom_params:
params[custom.key] = self.__custom_param(
connection,
paramType=custom.paramType,
value=custom.value,
schema=db_schema,
)
fn = (
fn
if db_schema is None or db_schema.replace(" ", "") == ""
else f"{db_schema}.{fn}"
)
if self.__verbose:
self.show_info(fn, params, ret_type, model, db_schema, env)
result_set = (
cursor.callfunc(fn, ret_type.value, keywordParameters=params)
if params != None
else cursor.callfunc(fn, ret_type.value)
)
safely_exec(lambda c: c.close(), args=[cursor])
return self.__proccess_result_set(result_set, ret_type, model)
def __proccess_result_set(
self, result_set, ret_type: OracleType, model: models.Model = None
):
"""
New version of result set processor
"""
if ret_type == OracleType.cursor:
columns = [field[0] for field in result_set.description]
if model is None:
columns = [field[0] for field in result_set.description]
rows = [d for d in result_set]
data = [dict(zip(columns, row)) for row in rows]
for d in data:
for key, value in d.items():
if isinstance(d[key], cx_Oracle.LOB):
d[key] = json.loads(str(value))
return {"data": json.dumps(data, default=str)}
else:
return [model(**dict(zip(columns, r))) for r in result_set]
elif OracleType.number:
try:
return float(result_set)
except:
return result_set
elif OracleType.integer:
try:
return int(result_set)
except:
return result_set
elif OracleType.decimal:
try:
return float(result_set)
except:
return result_set
else:
return str(result_set)
def __custom_param(
self,
connection: Any,
paramType: OracleParam,
value: Union[List[int], List[float], List[str], List[Any]],
schema: str = None,
):
"""
Make custom param
"""
db_schema = (
"" if (schema is None or schema.replace(" ", "") == "") else f"{schema}."
)
list_type = connection.gettype(f"{db_schema}{paramType.value}")
return list_type.newobject(value)
def show_info(self, fn, params, ret_type, v_model, curr_schema, l_env):
c_info("\n|-------------------------------------------------|\n", True)
c_info(f" Function Called: {fn} ", True)
c_info(" Params: {}".format(params), True)
c_info(" Return Type: {}".format(ret_type.value), True)
c_info(f" Ref Volcated Model: {v_model}", True)
c_info(f" DB Schema: {curr_schema}", True)
c_info(f" Environment: P: {l_env} G: {self.__env}", True)
c_info("\n|-------------------------------------------------|\n", True) | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/db/oracle.py | oracle.py |
from typing import Any, Dict, List
from zpy.cloud.aws import AWSCredentials, AWS_DEFAULT_REGION
import boto3
import json
class Firehose:
fh_client = None
with_profile: bool = False
def __init__(
self,
credentials: AWSCredentials = None,
with_profile: bool = True,
) -> None:
self.with_profile = with_profile
if with_profile or credentials is None:
self.fh_client = boto3.client("firehose", region_name=AWS_DEFAULT_REGION)
else:
self.fh_client = boto3.client(
"firehose",
aws_access_key_id=credentials.access_key,
aws_secret_access_key=credentials.secret_key,
region_name=credentials.region,
)
def send_data_record(self, data: dict, stream_name: str):
"""
Put one record to delivery stream
"""
try:
response = self.fh_client.put_record(
DeliveryStreamName=stream_name, Record=self.__prepare_record(data)
)
return response
except Exception as e:
print(e)
return None
def __prepare_record(self, data: Dict) -> Dict:
dumped = json.dumps(data)
encoded = dumped.encode("utf-8")
return {"Data": encoded}
def send_batch_data(self, data: List[Dict[Any, Any]], stream_name: str):
"""
Put one record to delivery stream
:data List of record to send firehouse
"""
try:
records = list(map(self.__prepare_record, data))
response = self.fh_client.put_record_batch(
DeliveryStreamName=stream_name, Records=records
)
return response
except Exception as e:
print(e)
return None
def describe_stream(
self, stream_name: str, limit: int = 123, startId: str = None
) -> dict:
try:
response = self.fh_client.describe_delivery_stream(
DeliveryStreamName=stream_name,
Limit=limit,
ExclusiveStartDestinationId=startId,
)
return response
except Exception as e:
print(e)
return None | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/cloud/aws/firehose.py | firehose.py |
from typing import Dict, Union
from zpy.api.models import ZModel
from zpy.cloud.aws import AWSCredentials, AWS_DEFAULT_REGION
import boto3
import json
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
class SSMParameter:
ssm_client = None
with_profile: bool = False
store = {}
prefix: str = None
def __init__(
self,
credentials: AWSCredentials = None,
with_profile: bool = True,
prefix: str = None,
) -> None:
self.with_profile = with_profile
self.prefix = prefix
if with_profile or credentials is None:
self.ssm = boto3.client("ssm", region_name=AWS_DEFAULT_REGION)
else:
self.ssm = boto3.client(
"ssm",
aws_access_key_id=credentials.access_key,
aws_secret_access_key=credentials.secret_key,
region_name=credentials.region,
)
def get_from_cache(self, name: str, model: ZModel = None) -> Union[Dict, ZModel]:
"""
Get parameter stored in cache.
"""
if name not in self.store:
return None
data = self.store[name]
if model == None:
return data
return model(**data)
def get(
self,
prefix: str = None,
decryption: bool = True,
store: bool = False,
store_name: str = "",
model: ZModel = None,
refresh: bool = False,
) -> Union[Dict, ZModel]:
"""
Get paramater from AWS SSM
"""
if store_name in self.store and refresh == False:
data = self.store[store_name]
if model != None:
return model(**data)
return data
if prefix == None and self.prefix == None:
raise Exception("Prefix or parameter name didnt provided.")
real_path = prefix or ""
if self.prefix != None:
real_path = f"{self.prefix}{real_path}"
parameter = self.ssm.get_parameter(Name=real_path, WithDecryption=decryption)
if store and store_name:
self.store[store_name] = json.loads(parameter["Parameter"]["Value"])
if model == None:
return json.loads(parameter["Parameter"]["Value"])
return model(**json.loads(parameter["Parameter"]["Value"])) | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/cloud/aws/ssm.py | ssm.py |
import json
from botocore.exceptions import ClientError
import boto3
from typing import Any, Dict
from . import AWSCredentials, CredentialsMode, AWS_DEFAULT_REGION
class S3:
initialized: bool = False
s3_client = None
with_profile: bool = True
def __init__(
self,
credentials: AWSCredentials = None,
bucket: str = None,
initialize: bool = False,
with_profile: bool = True,
) -> None:
self.credentials = credentials
self.bucket = bucket
self.with_profile = with_profile
if initialize:
self.__init_client(credentials, with_profile)
def set_credentials_mode(self, mode: CredentialsMode):
if mode == CredentialsMode.CREDENTIALS:
self.with_profile = False
return True
self.with_profile = True
return True
def __init_client(self, credentials: AWSCredentials, profile: bool = True):
if credentials is None and profile == False:
raise Exception("Credentials didn't provided")
if credentials != None and profile == False:
self.s3_client = boto3.client(
"s3",
aws_access_key_id=credentials.access_key,
aws_secret_access_key=credentials.secret_key,
# aws_session_token=credentials.session_token,
region_name=AWS_DEFAULT_REGION,
)
else:
self.s3_client = boto3.client(
"s3",
region_name=AWS_DEFAULT_REGION,
)
self.initialized = True
def get(
self, full_key: str, bucket: str = None, credentials: AWSCredentials = None
) -> Any:
"""
Get object from s3 bucket
"""
real_bucket = self.__validate(bucket, credentials)
s3_object = self.s3_client.get_object(Bucket=real_bucket, Key=full_key)
content = s3_object["Body"].read()
return content
def download(
self,
full_key: str,
bucket: str,
local_file: str,
credentials: AWSCredentials = None,
) -> Any:
"""
Get object from s3 bucket
"""
real_bucket = self.__validate(bucket, credentials)
self.s3_client.download_file(real_bucket, full_key, local_file)
return True
def get_json(
self, full_key: str, bucket: str = None, credentials: AWSCredentials = None
):
json_obj = self.get(full_key, bucket, credentials)
return json.loads(json_obj.decode("utf-8"))
def put(
self,
object: Any,
full_key: str,
bucket: str = None,
credentials: AWSCredentials = None,
) -> Any:
"""
Put object from s3 bucket
"""
real_bucket = self.__validate(bucket, credentials)
result = self.s3_client.put_object(Body=object, Bucket=bucket, Key=full_key)
return result
def put_json(
self,
json_object: Dict,
full_key: str,
bucket: str = None,
credentials: AWSCredentials = None,
):
"""
Upload JSON|DICT object to S3
Parameters
----------
json_object : dict
The dict json to upload s3
full_key : str
The file name
bucket : str, optional
Bucket name
"""
json_parsed = str(json.dumps(json_object))
return self.put(json_parsed, full_key, bucket, credentials)
def upload(
self,
object_up: Any,
object_name: str,
bucket: str = None,
credentials: AWSCredentials = None,
) -> bool:
try:
real_bucket = self.__validate(bucket, credentials)
response = self.s3_client.upload_fileobj(
object_up, real_bucket, object_name
)
return response
except ClientError as e:
print(e)
return None
def __validate(self, bucket: str = None, credentials: AWSCredentials = None) -> str:
"""
Vrify aws credentials
"""
real_bucket = self.bucket if bucket is None else bucket
if real_bucket is None:
raise Exception("Bucket didn't provided")
if self.initialized == False:
self.__init_client(self.credentials if credentials is None else credentials)
return real_bucket | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/cloud/aws/s3.py | s3.py |
import boto3
from botocore.exceptions import ClientError
import datetime
class SSMParameterStore(object):
"""
Provide a dictionary-like interface to access AWS SSM Parameter Store
"""
def __init__(self, prefix=None, ssm_client=None, ttl=None):
self._prefix = (prefix or "").rstrip("/") + "/"
self._client = ssm_client or boto3.client("ssm")
self._keys = None
self._substores = {}
self._ttl = ttl
def get(self, name, **kwargs):
assert name, "Name can not be empty"
if self._keys is None:
self.refresh()
abs_key = "%s%s" % (self._prefix, name)
if name not in self._keys:
if "default" in kwargs:
return kwargs["default"]
raise KeyError(name)
elif self._keys[name]["type"] == "prefix":
if abs_key not in self._substores:
store = self.__class__(
prefix=abs_key, ssm_client=self._client, ttl=self._ttl
)
store._keys = self._keys[name]["children"]
self._substores[abs_key] = store
return self._substores[abs_key]
else:
return self._get_value(name, abs_key)
def refresh(self):
self._keys = {}
self._substores = {}
paginator = self._client.get_paginator("describe_parameters")
pager = paginator.paginate(
ParameterFilters=[
dict(Key="Path", Option="Recursive", Values=[self._prefix])
]
)
for page in pager:
for p in page["Parameters"]:
paths = p["Name"][len(self._prefix) :].split("/")
self._update_keys(self._keys, paths)
@classmethod
def _update_keys(cls, keys, paths):
name = paths[0]
# this is a prefix
if len(paths) > 1:
if name not in keys:
keys[name] = {"type": "prefix", "children": {}}
cls._update_keys(keys[name]["children"], paths[1:])
else:
keys[name] = {"type": "parameter", "expire": None}
def keys(self):
if self._keys is None:
self.refresh()
return self._keys.keys()
def _get_value(self, name, abs_key):
entry = self._keys[name]
# simple ttl
if self._ttl == False or (
entry["expire"] and entry["expire"] <= datetime.datetime.now()
):
entry.pop("value", None)
if "value" not in entry:
parameter = self._client.get_parameter(Name=abs_key, WithDecryption=True)[
"Parameter"
]
value = parameter["Value"]
if parameter["Type"] == "StringList":
value = value.split(",")
entry["value"] = value
if self._ttl:
entry["expire"] = datetime.datetime.now() + datetime.timedelta(
seconds=self._ttl
)
else:
entry["expire"] = None
return entry["value"]
def __contains__(self, name):
try:
self.get(name)
return True
except:
return False
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, key, value):
raise NotImplementedError()
def __delitem__(self, name):
raise NotImplementedError()
def __repr__(self):
return "ParameterStore[%s]" % self._prefix | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/cloud/aws/ssm_paramater_store.py | ssm_paramater_store.py |
from Cryptodome.Cipher import AES
from Cryptodome import Random
from hashlib import sha256
import base64
import os
import json
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2007, The Cogent Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z", "Jesus Salazar"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
## Class Encryptor
# Encryptor class contains AES encrypt/decrypt functions
#
class AESEncryptor:
"""
Helper class for data security this contains certain methods for it.
AES (Advanced Encryption Standard) is a symmetric block cipher standardized by NIST .
It has a fixed data block size of 16 bytes. Its keys can be 128, 192, or 256 bits long.
Attributes
----------
default_block_size : int
Default block size for aes (default 32)
_sk_env : str
Key for get secret key from environment
Methods
-------
__is_valid(sk=None)
Check if the secret key of argument is null, if that is null try to get secret key from environment.
encrypt
"""
default_block_size: int = 32
_sk_env = "AES_SK"
@staticmethod
def __is_valid(sk: str = None):
if sk is not None:
return sk
sk_env: str = os.getenv(AESEncryptor._sk_env)
if sk_env is not None:
return sk_env
raise Exception("AES Secret key was not provided!")
@staticmethod
def decrypt_ws_response(payload: dict, secret_key=None) -> dict:
json_decrypted = AESEncryptor.decrypt(payload["data"], secret_key)
return json_decrypted
@staticmethod
def encrypt_ws_request(payload: dict, secret_key=None) -> dict:
encrypted_payload = AESEncryptor.encrypt(json.dumps(payload), secret_key)
return {"data": encrypted_payload}
@staticmethod
def json_decrypt(json_encrypted: str, secret_key=None) -> dict:
return json.loads(AESEncryptor.encrypt(json_encrypted, secret_key))
@staticmethod
def json_encrypt(json_to_encrypt: dict, secret_key=None) -> str:
json_str = json.dumps(json_to_encrypt)
return AESEncryptor.encrypt(json_str, secret_key)
@staticmethod
def json_decrypt(json_encrypted: str, secret_key=None) -> dict:
return json.loads(AESEncryptor.encrypt(json_encrypted, secret_key))
@staticmethod
def encrypt(
value: str,
secret_key: str = None,
aes_mode=AES.MODE_CBC,
charset="utf-8",
block_size: int = 16,
) -> str:
secret_key = AESEncryptor.__is_valid(secret_key).encode(charset)
raw_bytes = AESEncryptor.__pad(value)
iv = Random.new().read(block_size)
cipher = AES.new(secret_key, aes_mode, iv)
return base64.b64encode(iv + cipher.encrypt(raw_bytes)).decode(charset)
@staticmethod
def decrypt(
value: str, secret_key=None, aes_mode=AES.MODE_CBC, charset="utf-8"
) -> str:
secret_key = str.encode(AESEncryptor.__is_valid(secret_key))
encrypted = base64.b64decode(value)
iv = encrypted[:16]
cipher = AES.new(secret_key, aes_mode, iv)
return AESEncryptor.__un_pad(cipher.decrypt(encrypted[16:])).decode(charset)
@staticmethod
def genHash(value: str, charset="utf-8") -> str:
return sha256(value.encode(charset)).hexdigest()
@staticmethod
def __pad(s: str, block_size: int = 16, charset: str = "utf-8") -> bytes:
return bytes(
s
+ (block_size - len(s) % block_size)
* chr(block_size - len(s) % block_size),
charset,
)
@staticmethod
def __un_pad(value: str) -> str:
return value[0 : -ord(value[-1:])] | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/utils/Encryptor.py | Encryptor.py |
import gzip
import json
__author__ = "NoΓ© Cruz | [email protected]"
__copyright__ = "Copyright 2007, The Cogent Project"
__credits__ = ["NoΓ© Cruz", "Zurck'z", "Jesus Salazar"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "NoΓ© Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
## Class GZip
# Zipper class contains compress/decompress gzip methods
#
class GZip():
"""
Helper compress/decompress gzip
"""
@staticmethod
def unzip_json( json_data_compressed) -> dict:
"""Decompress json compressed provided
Parameters
----------
data : bytes | str
JSON data to descompress
Returns
-------
dict
JSON descompressed
"""
return json.loads(GZip.decompress(json_data_compressed))
@staticmethod
def gzip_json( json_data: dict) -> bytes:
"""Compress JSON with gzip
Parameters
----------
data : str
JSON data to compress
enc : str, optional
charset encoding (default is
utf-8)
Returns
-------
bytes
Data compressed
"""
return GZip.compress(json.dumps(json_data))
@staticmethod
def compress( data: str, enc = 'utf-8') -> bytes:
"""Decompress data provided
Parameters
----------
data : str
Data to compress
enc : str, optional
charset encoding (default is
utf-8)
Returns
-------
bytes
Data compressed
"""
return gzip.compress(bytes(data,enc))
@staticmethod
def decompress(data: bytes, enc = 'utf-8') -> str:
"""Decompress data provided
Parameters
----------
data : str
Gzip compressed data
enc : str, optional
charset encoding (default is
utf-8)
Returns
-------
str
Data descompressed in string format
"""
return gzip.decompress(data).decode(enc) | zpy-flask-msc | /zpy_flask_msc-0.0.1-py3-none-any.whl/zpy/utils/GZip.py | GZip.py |
<div align="center">
<a href="https://www.zumolabs.ai/?utm_source=github.com&utm_medium=referral&utm_campaign=zpy"><img src="https://github.com/ZumoLabs/zpy/raw/main/docs/assets/zl_tile_logo.png" width="100px"/></a>
**`zpy`: Synthetic data in Blender.**
<p align="center">
<a href="https://discord.gg/nXvXweHtG8"><img alt="Discord" title="Discord" src="https://img.shields.io/badge/-ZPY Devs-grey?style=for-the-badge&logo=discord&logoColor=white"/></a>
<a href="https://twitter.com/ZumoLabs"><img alt="Twitter" title="Twitter" src="https://img.shields.io/badge/-@ZumoLabs-1DA1F2?style=for-the-badge&logo=twitter&logoColor=white"/></a>
<a href="https://www.youtube.com/channel/UCcU2Z8ArljfDzfq7SOz-ytQ"><img alt="Youtube" title="Youtube" src="https://img.shields.io/badge/-ZumoLabs-red?style=for-the-badge&logo=youtube&logoColor=white"/></a>
<a href="https://pypi.org/project/zpy-zumo/"><img alt="PyPI" title="PyPI" src="https://img.shields.io/badge/-PyPI-yellow?style=for-the-badge&logo=PyPI&logoColor=white"/></a>
<a href="https://zumolabs.github.io/zpy/"><img alt="Docs" title="Docs" src="https://img.shields.io/badge/-Docs-black?style=for-the-badge&logo=Read%20the%20docs&logoColor=white"/></a>
</p>
</div>

## Abstract
Collecting, labeling, and cleaning data for computer vision is a pain. Jump into the future and create your own data instead! Synthetic data is faster to develop with, effectively infinite, and gives you full control to prevent bias and privacy issues from creeping in. We created `zpy` to make synthetic data easy, by simplifying the simulation (sim) creation process and providing an easy way to generate synthetic data at scale.
Check out our full [**documentation** :bookmark_tabs:](https://zumolabs.github.io/zpy/)
## What's New? :rocket:
- (06/23/21) Check out our new [script writing guide](https://zumolabs.github.io/zpy/zpy/tutorials/script_writing_guide/)
- (06/17/21) Read our [latest article](https://www.zumolabs.ai/post/synthetic-data-shelf-simulation) on our blog!
- (06/08/21) Blender 2.93 Support is out! Latest features can be found [on blender.org](https://www.blender.org/download/releases/2-93/).
## Install [:thinking:](https://zumolabs.github.io/zpy/zpy/install/pip/)
You can install `zpy` with pip:
```
pip install zpy-zumo
```
More installation instructions can be found in the docs:
- [Install using pip **(Windows/Mac/Linux)**](https://zumolabs.github.io/zpy/zpy/install/pip/)
- [Install Blender Addon from .zip **(Windows/Mac/Linux)**](https://zumolabs.github.io/zpy/addon/install/)
- [Install from script **(Mac/Linux)**](https://zumolabs.github.io/zpy/zpy/install/script/)
- [Developer mode **(Linux)**](https://zumolabs.github.io/zpy/zpy/install/linux/)
- [Developer mode **(Windows)**](https://zumolabs.github.io/zpy/zpy/install/windows/)
| OS | Status |
|:-----------|:-----------|
| Linux | :heavy_check_mark: |
| MacOS | :heavy_check_mark: |
| Windows | [zpy#126](https://github.com/ZumoLabs/zpy/issues/126) |
## Contribute [:busts_in_silhouette:](https://zumolabs.github.io/zpy/overview/contribute/)
We welcome community contributions! Search through the [current issues](https://github.com/ZumoLabs/zpy/issues) or open your own.
## License [:page_facing_up:](https://zumolabs.github.io/zpy/overview/license/)
This release of zpy is under the GPLv3 license, a free copyleft license used by Blender. TLDR: Its free, use it!
## Citation [:writing_hand:](https://zumolabs.github.io/zpy/overview/citation/)
If you use `zpy` in your research, we would appreciate the citation!
```bibtex
@misc{zpy,
title={zpy: Synthetic data for Blender.},
author={Ponte, H. and Ponte, N. and Crowder, S.},
journal={GitHub. Note: https://github.com/ZumoLabs/zpy},
volume={1},
year={2021}
}
```
| zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/README.md | README.md |
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py)
)
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY[
"git"
] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
mo = re.search(
r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if "py2exe" in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(
target_versionfile, self._versioneer_generated_versions
)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (
EnvironmentError,
configparser.NoSectionError,
configparser.NoOptionError,
) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(
" appending versionfile_source ('%s') to MANIFEST.in"
% cfg.versionfile_source
)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1) | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/versioneer.py | versioneer.py |
import logging
import copy
import random
from pathlib import Path
from typing import Tuple, Union, List
import bpy
import gin
import zpy
log = logging.getLogger(__name__)
def verify(
mat: Union[bpy.types.Material, str],
check_none: bool = True,
) -> bpy.types.Material:
"""Get a material given either its name or the object itself.
Args:
mat (Union[bpy.types.Material, str]): Material (or it's name)
check_none (bool, optional): Check to make sure material exists. Defaults to True.
Raises:
ValueError: Material does not exist.
Returns:
bpy.types.Material: Material object.
"""
if isinstance(mat, str):
mat = bpy.data.materials.get(mat)
if check_none and mat is None:
raise ValueError(f"Could not find material {mat}.")
return mat
def for_mat_in_obj(
obj: Union[bpy.types.Object, str],
) -> bpy.types.Material:
"""Yield materials in scene object.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
Raises:
ValueError: [description]
Returns:
bpy.types.Material: Material object.
"""
obj = zpy.objects.verify(obj)
if len(obj.material_slots) > 1:
for mat in obj.material_slots:
yield mat.material
else:
if obj.active_material is not None:
return obj.active_material
else:
log.debug(f"No active material or material slots found for {obj.name}")
return None
_SAVED_MATERIALS = {}
def save_mat_props(
mat: Union[bpy.types.Material, str],
) -> None:
"""Save a pose (rot and pos) to dict.
Args:
mat (Union[bpy.types.Material, str]): Material (or it's name)
"""
log.info(f"Saving material properties for {mat.name}")
_SAVED_MATERIALS[mat.name] = get_mat_props(mat)
def restore_mat_props(
mat: Union[bpy.types.Material, str],
) -> None:
"""Restore an object to a position.
Args:
mat (Union[bpy.types.Material, str]): Material (or it's name)
"""
log.info(f"Restoring material properties for {mat.name}")
set_mat_props(mat, _SAVED_MATERIALS[mat.name])
def restore_all_mat_props() -> None:
"""Restore all jittered materials to original look."""
for mat_name, mat_props in _SAVED_MATERIALS.items():
set_mat_props(mat_name, mat_props)
def get_mat_props(
mat: Union[bpy.types.Material, str],
) -> Tuple[float]:
"""Get (some of the) material properties.
Args:
mat (Union[bpy.types.Material, str]): Material (or it's name)
Returns:
Tuple[float]: Material property values (roughness, metallic, specular).
"""
mat = verify(mat)
bsdf_node = mat.node_tree.nodes.get("Principled BSDF")
if bsdf_node is None:
log.warning(f"No BSDF node in {mat.name}")
return (0.0, 0.0, 0.0)
return (
bsdf_node.inputs["Roughness"].default_value,
bsdf_node.inputs["Metallic"].default_value,
bsdf_node.inputs["Specular"].default_value,
)
def set_mat_props(
mat: Union[bpy.types.Material, str],
prop_tuple: Tuple[float],
) -> None:
"""Set (some of the) material properties.
Args:
mat (Union[bpy.types.Material, str]): Material (or it's name)
prop_tuple (Tuple[float]): Material property values (roughness, metallic, specular).
"""
mat = verify(mat)
# TODO: Work backwards from Material output node instead of
# assuming a 'Principled BSDF' node
bsdf_node = mat.node_tree.nodes.get("Principled BSDF", None)
if bsdf_node is None:
log.warning(f"No BSDF node in {mat.name}")
return
bsdf_node.inputs["Roughness"].default_value = copy.copy(prop_tuple[0])
bsdf_node.inputs["Metallic"].default_value = copy.copy(prop_tuple[1])
bsdf_node.inputs["Specular"].default_value = copy.copy(prop_tuple[2])
@gin.configurable
def jitter(
mat: Union[bpy.types.Material, str],
std: float = 0.2,
save_first_time: bool = True,
) -> None:
"""Randomize an existing material a little.
Args:
mat (Union[bpy.types.Material, str]): Material (or it's name)
std (float, optional): Standard deviation of gaussian noise over material property. Defaults to 0.2.
save_first_time (bool, optional): Save the material props first time jitter is called and
restore before jittering every subsequent time. Defaults to True.
"""
mat = verify(mat)
if save_first_time:
if _SAVED_MATERIALS.get(mat.name, None) is None:
save_mat_props(mat)
else:
restore_mat_props(mat)
log.info(f"Jittering material {mat.name}")
mat_props = get_mat_props(mat)
jittered_mat_props = tuple(map(lambda p: p + random.gauss(0, std), mat_props))
set_mat_props(mat, jittered_mat_props)
@gin.configurable
def random_mat(
obj: Union[bpy.types.Material, str],
list_of_mats: List[bpy.types.Material],
resegment: bool = True,
):
"""[summary]
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
list_of_mats (List[bpy.types.Material]): List of possible materials to choose from
resegment (bool, optional): Re-segment the object after setting material. Defaults to True.
"""
obj = verify(obj)
log.debug(f"Choosing random material for obj: {obj.name}")
_mat = random.choice(list_of_mats)
_mat = zpy.material.verify(_mat)
zpy.material.set_mat(obj, _mat)
if resegment:
# Have to re-segment the object to properly
# set the properties on the new material
zpy.objects.segment(
obj, name=obj.seg.instance_name, color=obj.seg.instance_color
)
zpy.objects.segment(
obj,
as_category=True,
name=obj.seg.category_name,
color=obj.seg.category_color,
)
@gin.configurable
def random_texture_mat() -> bpy.types.Material:
"""Generate a random material from a random texture image.
Returns:
bpy.types.Material: The newly created material.
"""
texture_dir = zpy.assets.texture_dir()
texture_path = zpy.files.pick_random_from_dir(
texture_dir, suffixes=[".jpg", ".png"]
)
return make_mat_from_texture(texture_path, name=texture_path.stem)
@gin.configurable
def filtered_dir_texture_mat(dir_name: Union[Path, str] = None) -> bpy.types.Material:
"""Generate a material based on a word, which searches for related texture images.
TODO: Requires Zumo Labs Asset Library
Args:
dir_name (Union[Path, str]): Path of directory in $ASSETS
Returns:
bpy.types.Material: The newly created material.
"""
if dir_name is None:
log.warning("No filter provided, using random texture mat instead.")
return random_texture_mat()
texture_dir = zpy.assets.texture_dir() / dir_name
texture_path = zpy.files.pick_random_from_dir(
texture_dir, suffixes=[".jpg", ".png"]
)
return make_mat_from_texture(texture_path, name=texture_path.stem)
@gin.configurable
def make_mat_from_texture(
texture_path: Union[Path, str],
name: str = None,
coordinate: str = "uv",
) -> bpy.types.Material:
"""Makes a material from a texture image.
Args:
texture_path (Union[Path, str]): Path to texture image.
name (str, optional): Name of new material.
coordinate (str, optional): Type of texture coordinates. Values are
"generated", "normal", "uv", "object" , defaults to "uv"
Returns:
bpy.types.Material: The newly created material.
"""
texture_path = zpy.files.verify_path(texture_path, make=False)
if name is None:
name = texture_path.stem
mat = bpy.data.materials.get(name, None)
if mat is None:
log.debug(f"Material {name} does not exist, creating it.")
mat = bpy.data.materials.new(name=name)
mat.use_nodes = True
bsdf_node = mat.node_tree.nodes.get("Principled BSDF")
out_node = mat.node_tree.nodes.get("Material Output")
tex_node = mat.node_tree.nodes.new("ShaderNodeTexImage")
tex_node.name = "ImageTexture"
coord_node = mat.node_tree.nodes.new("ShaderNodeTexCoord")
bpy.ops.image.open(filepath=str(texture_path))
tex_node.image = bpy.data.images[texture_path.name]
tex_node.image.colorspace_settings.name = "Filmic Log"
mat.node_tree.links.new(tex_node.outputs[0], bsdf_node.inputs[0])
# TODO: Texture coordinate index is hardcoded
valid_coordinates = ["generated", "normal", "uv", "object"]
assert (
coordinate in valid_coordinates
), f"Texture coordinate {coordinate} must be in {valid_coordinates}"
_coord_idx = valid_coordinates.index(coordinate)
mat.node_tree.links.new(coord_node.outputs[_coord_idx], tex_node.inputs[0])
mat.node_tree.links.new(out_node.inputs[0], bsdf_node.outputs[0])
tex_node.image.reload()
return mat
@gin.configurable
def make_mat_from_color(
color: Tuple[float],
name: str = None,
) -> bpy.types.Material:
"""Makes a material given a color.
Args:
color (Tuple[float]): Color tuple (RGB).
name (str, optional): Name of new material.
Returns:
bpy.types.Material: The newly created material.
"""
if name is None:
name = str(color)
mat = bpy.data.materials.get(name, None)
if mat is None:
log.debug(f"Material {name} does not exist, creating it.")
mat = bpy.data.materials.new(name=name)
mat.use_nodes = True
bsdf_node = mat.node_tree.nodes.get("Principled BSDF")
out_node = mat.node_tree.nodes.get("Material Output")
mat.node_tree.nodes.remove(bsdf_node)
bsdf_node = mat.node_tree.nodes.new("ShaderNodeBsdfDiffuse")
bsdf_node.inputs["Color"].default_value = color + (1.0,)
mat.node_tree.links.new(out_node.inputs[0], bsdf_node.outputs[0])
return mat
def set_mat(
obj: Union[bpy.types.Object, str],
mat: Union[bpy.types.Material, str],
recursive: bool = True,
) -> None:
"""Set the material for an object.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name) with an active material.
mat (Union[bpy.types.Material, str]): Material (or it's name)
recursive (bool, optional): Recursively set material for child objects. Defaults to True.
"""
obj = zpy.objects.verify(obj)
mat = zpy.material.verify(mat)
if hasattr(obj, "active_material"):
log.debug(f"Setting object {obj.name} material {mat.name}")
obj.active_material = mat
else:
log.warning("Object does not have material property")
return
# Recursively change material on all children of object
if recursive:
for child in obj.children:
set_mat(child, mat)
@gin.configurable
def make_aov_material_output_node(
mat: bpy.types.Material = None,
obj: bpy.types.Object = None,
style: str = "instance",
) -> None:
"""Make AOV Output nodes in Composition Graph.
Args:
mat (bpy.types.Material, optional): A blender material (either it's name or the object itself).
obj (bpy.types.Object, optional): A blender object (either it's name or the object itself).
style (str, optional): Type of segmentation in [instance, category]. Defaults to 'instance'.
Raises:
ValueError: Invalid style, no object or material given.
"""
# Make sure engine is set to Cycles
scene = zpy.blender.verify_blender_scene()
if not (scene.render.engine == "CYCLES"):
log.warning(" Setting render engine to CYCLES to use AOV")
scene.render.engine == "CYCLES"
# TODO: Refactor this legacy "styles" code
# Only certain styles are available
valid_styles = ["instance", "category"]
assert (
style in valid_styles
), f"Invalid style {style} for AOV material output node, must be in {valid_styles}."
# HACK: multiple material slots
all_mats = []
# Use material
if mat is not None:
all_mats = [mat]
# Get material from object
elif obj is not None:
if obj.active_material is None:
log.debug(f"No active material found for {obj.name}")
return
if len(obj.material_slots) > 1:
for mat in obj.material_slots:
all_mats.append(mat.material)
else:
all_mats.append(obj.active_material)
else:
raise ValueError("Must pass in an Object or Material")
# HACK: multiple material slots
for mat in all_mats:
# Make sure material is using nodes
if not mat.use_nodes:
mat.use_nodes = True
tree = mat.node_tree
# Vertex Color Node
vcol_node = zpy.nodes.get_or_make(
f"{style} Vertex Color", "ShaderNodeVertexColor", tree
)
vcol_node.layer_name = style
# AOV Output Node
# HACK: This type of node has a "name" property which prevents using the
# normal zpy.nodes code due to a scope conflict with the bpy.types.Node.name property
# See: https://docs.blender.org/api/current/bpy.types.ShaderNodeOutputAOV.html
_name = style
aovout_node = None
for _node in tree.nodes:
if _node.name == _name:
aovout_node = _node
if aovout_node is None:
aovout_node = tree.nodes.new("ShaderNodeOutputAOV")
aovout_node.name = style
tree.links.new(vcol_node.outputs["Color"], aovout_node.inputs["Color"]) | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/material.py | material.py |
from pathlib import Path
from typing import List, Union
import numpy as np
import logging
import shutil
log = logging.getLogger(__name__)
def set_log_levels(
level: str = None,
modules: List[str] = [
"zpy",
"zpy_addon",
"bpy.zpy_addon" "neuralyzer",
"bender",
],
log_format: str = "%(asctime)s: %(levelname)s %(filename)s] %(message)s",
) -> None:
"""Set logger levels for all zpy modules.
Args:
level (str, optional): log level in [info, debug, warning]. Defaults to logging.Info.
modules (List[str], optional): Modules to set logging for.
Defaults to [ 'zpy', 'zpy_addon', 'bpy.zpy_addon' 'neuralyzer', ].
log_format (str, optional): Log format string.
Defaults to '%(asctime)s: %(levelname)s %(filename)s] %(message)s'
"""
if level is None:
log_level = logging.INFO
elif level == "info":
log_level = logging.INFO
elif level == "debug":
log_level = logging.DEBUG
elif level == "warning":
log_level = logging.WARNING
else:
log.warning(f"Invalid log level {level}")
return
logging.basicConfig(format=log_format)
for logger_name in modules:
try:
log.warning(f"Setting log level for {logger_name} to {log_level} ({level})")
logging.getLogger(logger_name).setLevel(log_level)
except Exception:
pass
def linebreaker_log(
message: str,
line_length: int = 80,
):
"""Good looking line-breaker log message.
Args:
message (str): Message to put in the log.
line_length (int, optional): Length of line breakers ----. Defaults to 80.
"""
# Clip the message
message = message[:line_length]
whitespace = " " * int((line_length - len(message)) / 2)
# La piece de resistance
log.info("-" * line_length)
log.info(f"{whitespace}{message.upper()}{whitespace}")
log.info("-" * line_length)
def setup_file_handlers(
log_dir: Union[str, Path] = "/tmp",
error_log: bool = True,
debug_log: bool = True,
) -> None:
"""Output log files for requests
Args:
error_log: output error.log
debug_log: output debug.log
log_dir: directory to output log files
"""
root = logging.getLogger()
info_fh = logging.FileHandler(f"{log_dir}/info.log", mode="w")
info_fh.setLevel(logging.INFO)
root.addHandler(info_fh)
if error_log:
error_fh = logging.FileHandler(f"{log_dir}/error.log", mode="w")
error_fh.setLevel(logging.ERROR)
root.addHandler(error_fh)
if debug_log:
debug_fh = logging.FileHandler(f"{log_dir}/debug.log", mode="w")
debug_fh.setLevel(logging.DEBUG)
root.addHandler(debug_fh)
def save_log_files(
output_dir: Union[str, Path],
log_dir: Union[str, Path] = "/tmp",
) -> None:
"""Save log files to output directory
Args:
output_dir: directory to save log files
log_dir: directory where logs exist
"""
for log in ["info.log", "debug.log", "error.log"]:
src = Path(log_dir) / log
dst = Path(output_dir) / log
if src.exists() and src != dst:
shutil.copy(src, dst)
def parse_log_file(log_file: Union[str, Path]) -> None:
import re
step_times, render_times = [], []
with open(log_file, "r") as f:
render_in_step = []
for line in f.readlines():
seconds = re.search("\d+\.\d+(?=s)", line)
if line.startswith("Rendering took"):
render_in_step.append(float(seconds.group(0)))
elif line.startswith("Simulation step took"):
render_times.append(render_in_step)
render_in_step = []
step_times.append(float(seconds.group(0)))
return {
"avg_step_time": sum(step_times) / len(step_times),
"avg_render_time": [
v / len(render_times) for v in np.sum(render_times, axis=0)
],
"step_times": step_times,
"render_times": render_times,
} | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/logging.py | logging.py |
import logging
import random
from pathlib import Path
from typing import List, Tuple, Union
import bpy
import gin
import mathutils
import zpy
log = logging.getLogger(__name__)
def verify(
obj: Union[bpy.types.Object, str],
check_none=True,
) -> bpy.types.Object:
"""Return object given name or Object type object.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
check_none (bool, optional): Raise error if object does not exist. Defaults to True.
Raises:
ValueError: Object does not exist.
Returns:
bpy.types.Object: Scene object.
"""
if isinstance(obj, str):
obj = bpy.data.objects.get(obj)
if check_none and obj is None:
raise ValueError(f"Could not find object {obj}.")
return obj
def load_blend_obj(
name: str,
path: Union[Path, str],
link: bool = False,
) -> bpy.types.Object:
"""Load object from blend file.
Args:
name (str): Name of object to be loaded.
path (Union[Path, str]): Path to the blender file with the object.
link (bool, optional): Whether to link object to scene. Defaults to False.
Returns:
bpy.types.Object: Scene object that was loaded in.
"""
path = zpy.files.verify_path(path, make=False)
scene = zpy.blender.verify_blender_scene()
with bpy.data.libraries.load(str(path), link=link) as (data_from, data_to):
for from_obj in data_from.objects:
if from_obj.startswith(name):
log.debug(f"Loading obj {from_obj} from {str(path)}.")
data_to.objects.append(from_obj)
# Copy objects over to the current scene
for obj in data_to.objects:
scene.collection.objects.link(obj)
for texture_folder_name in ["Textures", "textures", "TEX"]:
texture_dir = path.parent / texture_folder_name
if texture_dir.exists():
bpy.ops.file.find_missing_files(directory=str(texture_dir))
break
return bpy.data.objects[name]
def select(
obj: Union[bpy.types.Object, str],
) -> None:
"""Select an object.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
"""
obj = verify(obj)
log.debug(f"Selecting obj: {obj.name}")
view_layer = zpy.blender.verify_view_layer()
log.debug(f"Before select, bpy.context.active_object = {bpy.context.active_object}")
log.debug(f"Before select, view_layer.objects.active = {view_layer.objects.active}")
# De-select everything
bpy.ops.object.select_all(action="DESELECT")
if bpy.context.active_object is not None:
bpy.context.active_object.select_set(False)
view_layer.objects.active = None
# Select the new object
view_layer.objects.active = obj
bpy.context.view_layer.objects.active = obj
bpy.data.objects[obj.name].select_set(True, view_layer=view_layer)
log.debug(f"After select, bpy.context.active_object = {bpy.context.active_object}")
log.debug(f"After select, view_layer.objects.active = {view_layer.objects.active}")
def delete_obj(
obj: Union[bpy.types.Object, str],
) -> None:
"""Delete an object.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
"""
obj = verify(obj)
select(obj)
log.debug(f"Removing obj: {obj.name}")
# bpy.ops.object.delete()
bpy.data.objects.remove(obj, do_unlink=True)
def delete_obj_context(
obj: Union[bpy.types.Object, str],
) -> None:
"""Alternative way to delete an object.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
"""
obj = verify(obj)
log.debug(f"Removing obj: {obj.name}")
context_remove = bpy.context.copy()
context_remove["selected_objects"] = [obj]
bpy.ops.object.delete(context_remove)
def empty_collection(
collection: bpy.types.Collection = None,
method: str = "data",
) -> None:
"""Delete all objects in a collection
Args:
collection (bpy.types.Collection, optional): Optional collection to put new object inside of. Defaults to None.
method (str, optional): Deletetion method, the values are data and context
"""
if collection and (collection in list(bpy.data.collections)):
if method == "data":
for obj in collection.all_objects:
bpy.data.objects.remove(obj, do_unlink=True)
elif method == "context":
context_remove = bpy.context.copy()
context_remove["selected_objects"] = collection.all_objects
bpy.ops.object.delete(context_remove)
def is_inside(
location: Union[Tuple[float], mathutils.Vector],
obj: Union[bpy.types.Object, str],
) -> bool:
"""Is point inside a mesh.
https://blender.stackexchange.com/questions/31693/how-to-find-if-a-point-is-inside-a-mesh
Args:
location (Union[Tuple[float], mathutils.Vector]): Location (3-tuple or Vector) of point in 3D space.
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
Returns:
bool: Whether object is inside mesh.
"""
if not isinstance(location, mathutils.Vector):
location = mathutils.Vector(location)
is_found, closest_point, normal, _ = obj.closest_point_on_mesh(location)
if not is_found:
return False
p2 = closest_point - location
v = p2.dot(normal)
return not (v < 0.0)
def for_obj_in_selected_objs(context) -> bpy.types.Object:
"""Safe iterable for selected objects.
Yields:
bpy.types.Object: Objects in selected objects.
"""
zpy.blender.verify_view_layer()
for obj in context.selected_objects:
# Only meshes or empty objects TODO: Why the empty objects
if not (obj.type == "MESH" or obj.type == "EMPTY"):
continue
# Make sure object exists in the scene
if bpy.data.objects.get(obj.name, None) is None:
continue
context.view_layer.objects.active = obj
yield obj
def for_obj_in_collections(
collections: List[bpy.types.Collection],
filter_mesh: bool = False,
) -> bpy.types.Object:
"""Yield objects in list of collection.
Yields:
bpy.types.Object: Object in collection.
"""
for collection in collections:
# TODO: Windows does not like this
if not len(collection.objects) > 0:
log.debug(f"Collection {collection.name} is empty, skipping...")
continue
for obj in collection.all_objects:
if filter_mesh and obj.type == "MESH":
# This gives you direct access to data block
yield bpy.data.objects[obj.name]
else:
yield bpy.data.objects[obj.name]
def toggle_hidden(
obj: Union[bpy.types.Object, str],
hidden: bool = True,
filter_string: str = None,
) -> None:
"""Recursive function to make object and children invisible.
Optionally filter by a string in object name.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
hidden (bool, optional): Whether to hide or un-hide object. Defaults to True.
filter_string (str, optional): Filter objects to hide based on name containing this string. Defaults to None.
"""
obj = verify(obj)
if hasattr(obj, "hide_render") and hasattr(obj, "hide_viewport"):
if (filter_string is None) or (filter_string in obj.name):
log.debug(f"Hiding object {obj.name}")
bpy.data.objects[obj.name].select_set(True)
bpy.data.objects[obj.name].hide_render = hidden
bpy.data.objects[obj.name].hide_viewport = hidden
else:
log.debug(
f"Object {obj.name} does not contain filter string {filter_string}"
)
else:
log.warning("Object does not have hide properties")
return
for child in obj.children:
toggle_hidden(child, hidden=hidden, filter_string=filter_string)
def randomly_hide_within_collection(
collections: List[bpy.types.Collection],
chance_to_hide: float = 0.9,
) -> None:
"""Randomly hide objects in a list of collections.
Args:
collections (List[bpy.types.Collection]): A scene collection.
chance_to_hide (float, optional): Probability of hiding an object in the collection. Defaults to 0.9.
"""
to_hide = []
for obj in for_obj_in_collections(collections):
if random.random() < chance_to_hide:
to_hide.append(obj.name)
# HACK: hide objects by name, this causes segfault
# if done in the for loop above, due to some kind of
# pass by reference vs by value shenaniganry going on
# with blender python sitting on top of blender C
for name in to_hide:
bpy.data.objects[name].select_set(True)
bpy.data.objects[name].hide_render = True
bpy.data.objects[name].hide_viewport = True
def segment(
obj: Union[bpy.types.Object, str],
name: str = "default",
color: Tuple[float] = None,
as_category: bool = False,
as_single: bool = False,
) -> None:
"""Segment an object.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
name (str, optional): Name of category or instance. Defaults to 'default'.
color (Tuple[float], optional): Segmentation color. Defaults to None.
as_category (bool, optional): Segment as a category, if false will segment as instance. Defaults to False.
as_single (bool, optional): Segment all child objects as well. Defaults to False.
"""
if "use_sculpt_vertex_colors" in dir(bpy.context.preferences.experimental):
bpy.context.preferences.experimental.use_sculpt_vertex_colors = True
obj = verify(obj)
if color is None:
color = zpy.color.random_color(output_style="frgb")
obj.color = zpy.color.frgb_to_frgba(color)
if as_category:
obj.seg.category_name = name
obj.seg.category_color = color
seg_type = "category"
else:
obj.seg.instance_name = name
obj.seg.instance_color = color
seg_type = "instance"
# Make sure object material is set up correctly with AOV nodes
populate_vertex_colors(obj, zpy.color.frgb_to_frgba(color), seg_type)
zpy.material.make_aov_material_output_node(obj=obj, style=seg_type)
# Recursively add property to children objects
if as_single:
for child in obj.children:
segment(
obj=child,
name=name,
color=color,
as_category=as_category,
as_single=as_single,
)
def populate_vertex_colors(
obj: Union[bpy.types.Object, str],
color_rgba: Tuple[float],
seg_type: str = "instance",
) -> None:
"""Fill the given Vertex Color Layer with the color parameter values.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
color_rgba (Tuple[float]): Segmentation color.
seg_type (str, optional): Instance or Category segmentation. Defaults to 'instance'.
"""
obj = verify(obj)
if not obj.type == "MESH":
log.warning(f"Object {obj.name} is not a mesh, has no vertices.")
return
# TODO: Is this select needed?
# select(obj)
# Remove any existing vertex color data
if len(obj.data.sculpt_vertex_colors):
for vcol in obj.data.sculpt_vertex_colors.keys():
if seg_type in vcol:
obj.data.sculpt_vertex_colors.remove(
obj.data.sculpt_vertex_colors[seg_type]
)
# Add new vertex color data
obj.data.sculpt_vertex_colors.new(name=seg_type)
# Iterate through each vertex in the mesh
for i, _ in enumerate(obj.data.vertices):
obj.data.sculpt_vertex_colors[seg_type].data[i].color = color_rgba
def random_position_within_constraints(
obj: Union[bpy.types.Object, str],
) -> None:
"""Randomize position of object within constraints.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
"""
obj = verify(obj)
# Make sure object has constraints
_constraints = obj.constraints.get("Limit Location", None)
if _constraints is not None:
obj.location.x = random.uniform(
obj.constraints["Limit Location"].min_x,
obj.constraints["Limit Location"].max_x,
)
obj.location.y = random.uniform(
obj.constraints["Limit Location"].min_y,
obj.constraints["Limit Location"].max_y,
)
obj.location.z = random.uniform(
obj.constraints["Limit Location"].min_z,
obj.constraints["Limit Location"].max_z,
)
@gin.configurable
def copy(
obj: Union[bpy.types.Object, str],
name: str = None,
collection: bpy.types.Collection = None,
is_library_object: bool = False,
is_copy: bool = True,
) -> bpy.types.Object:
"""Create a copy of the object.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
name (str, optional): New name for the copied object. Defaults to None.
collection (bpy.types.Collection, optional): Optional collection to put new object inside of. Defaults to None.
is_library_object (bool, optional): Whether object is part of a linked library. Defaults to False.
is_copy (bool, optional): Make a deep copy of the mesh data block
Returns:
bpy.types.Object: The newly created scene object.
"""
obj = verify(obj)
obj_data = obj.data
if is_copy:
obj_data = obj.data.copy()
new_obj = bpy.data.objects.new(obj.name, obj_data)
if name is not None:
new_obj.name = name
if collection is not None:
collection.objects.link(new_obj)
else:
# Add to scene collection if no collection given
scene = zpy.blender.verify_blender_scene()
scene.collection.objects.link(obj)
# TODO: Library Overriding functions
if is_library_object:
log.warning(f"Making mesh and material data local for obj {new_obj.name}")
new_obj.data.make_local()
for i in range(len(new_obj.material_slots)):
bpy.data.objects[new_obj.name].material_slots[i].material.make_local()
# Original object reference is lost if local copies are made
new_obj = bpy.data.objects[new_obj.name]
return new_obj
def translate(
obj: Union[bpy.types.Object, str],
translation: Union[Tuple[float], mathutils.Vector] = (0, 0, 0),
is_absolute: bool = False,
) -> None:
"""Translate an object (in blender units).
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
translation (Union[Tuple[float], mathutils.Vector], optional): Translation vector (x, y, z). Default (0, 0, 0).
is_absolute: (bool, optional): The translation vector becomes the absolute world position
"""
obj = verify(obj)
log.debug(f"Translating object {obj.name} by {translation}")
log.debug(f"Before - obj.matrix_world\n{obj.matrix_world}")
if not isinstance(translation, mathutils.Vector):
translation = mathutils.Vector(translation)
if is_absolute:
obj.location = translation
else:
obj.location = obj.location + translation
log.debug(f"After - obj.matrix_world\n{obj.matrix_world}")
def rotate(
obj: Union[bpy.types.Object, str],
rotation: Union[Tuple[float], mathutils.Euler] = (0.0, 0.0, 0.0),
axis_order: str = "XYZ",
) -> None:
"""Rotate the given object with Euler angles.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
rotation (Union[Tuple[float], mathutils.Euler]): Rotation values in radians
axis_order (str, optional): Axis order of rotation
"""
obj = verify(obj)
view_layer = zpy.blender.verify_view_layer()
select(obj)
log.info(f"Rotating object {obj.name} by {rotation} radians in {axis_order}. ")
log.debug(f"Before - obj.matrix_world\n{obj.matrix_world}")
if not isinstance(rotation, mathutils.Euler):
rotation = mathutils.Euler(rotation)
new_rotation_mat = rotation.to_matrix() @ obj.rotation_euler.to_matrix()
new_rotation = new_rotation_mat.to_euler(axis_order)
obj.rotation_euler = mathutils.Euler(new_rotation, axis_order)
view_layer.update()
log.debug(f"After - obj.matrix_world\n{obj.matrix_world}")
def scale(
obj: Union[bpy.types.Object, str], scale: Tuple[float] = (1.0, 1.0, 1.0)
) -> None:
"""Scale an object.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
scale (Tuple[float], optional): Scale for each axis (x, y, z). Defaults to (1.0, 1.0, 1.0).
"""
obj = verify(obj)
view_layer = zpy.blender.verify_view_layer()
select(obj)
log.info(f"Scaling object {obj.name} by {scale}")
log.debug(f"Before - obj.matrix_world\n{obj.matrix_world}")
bpy.ops.transform.resize(value=scale)
view_layer.update()
log.debug(f"After - obj.matrix_world\n{obj.matrix_world}")
def jitter_mesh(
obj: Union[bpy.types.Object, str],
scale: Tuple[float] = (0.01, 0.01, 0.01),
) -> None:
"""Randomize the vertex coordinates of a mesh object.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
scale (Tuple[float], optional): Scale for vertex offset in each axis (x, y, z). Defaults to (0.01, 0.01, 0.01).
"""
obj = verify(obj)
if not obj.type == "MESH":
log.warning("Jitter mesh requires object to be of type MESH")
return
for vertex in obj.data.vertices:
offset = mathutils.Vector(
(
random.uniform(-1.0, 1.0) * obj.dimensions.x * scale[0],
random.uniform(-1.0, 1.0) * obj.dimensions.y * scale[1],
random.uniform(-1.0, 1.0) * obj.dimensions.z * scale[2],
)
)
vertex.co += offset
def jitter(
obj: Union[bpy.types.Object, str],
translate_range: Tuple[Tuple[float]] = (
(0, 0),
(0, 0),
(0, 0),
),
rotate_range: Tuple[Tuple[float]] = (
(0, 0),
(0, 0),
(0, 0),
),
scale_range: Tuple[Tuple[float]] = (
(1.0, 1.0),
(1.0, 1.0),
(1.0, 1.0),
),
) -> None:
"""Apply random scale (blender units) and rotation (radians) to object.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
translate_range (Tuple[Tuple[float]], optional): (min, max) of uniform noise on translation in (x, y, z) axes.
Defaults to ( (-0.05, 0.05), (-0.05, 0.05), (-0.05, 0.05), ).
rotate_range (Tuple[Tuple[float]], optional): (min, max) of uniform noise on rotation in (x, y, z) axes.
Defaults to ( (-0.05, 0.05), (-0.05, 0.05), (-0.05, 0.05), ).
scale_range (Tuple[Tuple[float]], optional): (min, max) of uniform noise on scale in (x, y, z) axes.
Defaults to ( (1.0, 1.0), (1.0, 1.0), (1.0, 1.0), ).
"""
obj = verify(obj)
translate(
obj,
translation=(
random.uniform(translate_range[0][0], translate_range[0][1]),
random.uniform(translate_range[1][0], translate_range[1][1]),
random.uniform(translate_range[2][0], translate_range[2][1]),
),
)
rotate(
obj,
rotation=(
random.uniform(rotate_range[0][0], rotate_range[0][1]),
random.uniform(rotate_range[1][0], rotate_range[1][1]),
random.uniform(rotate_range[2][0], rotate_range[2][1]),
),
)
scale(
obj,
scale=(
random.uniform(scale_range[0][0], scale_range[0][1]),
random.uniform(scale_range[1][0], scale_range[1][1]),
random.uniform(scale_range[2][0], scale_range[2][1]),
),
)
_SAVED_POSES = {}
def save_pose(
obj: Union[bpy.types.Object, str],
pose_name: str = None,
) -> None:
"""Save a pose (rot and pos) to dict.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
pose_name (str): Name of saved pose (will be stored in internal SAVED_POSES dict)
"""
obj = verify(obj)
log.info(f"Saving pose {pose_name} based on object {obj.name}")
if pose_name is None:
pose_name = obj.name
_SAVED_POSES[pose_name] = obj.matrix_world.copy()
def restore_pose(
obj: Union[bpy.types.Object, str],
pose_name: str = None,
) -> None:
"""Restore an object to a position.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name)
pose_name (str): Name of saved pose (must be in internal SAVED_POSES dict)
"""
obj = verify(obj)
log.info(f"Restoring pose {pose_name} to object {obj.name}")
if pose_name is None:
pose_name = obj.name
obj.matrix_world = _SAVED_POSES[pose_name]
def lighting_randomize(
energy_jitter: bool = True,
jitter: bool = True,
energy_range_sun: Tuple[int] = (1, 20),
energy_range_point: Tuple[int] = (100, 500),
) -> None:
"""Randomizes Lighting.
Args:
energy_jitter: bool: Whether to jitter the lighting intensity for lights in scene. Defaults to True
energy_range_sun: Tuple[int]: Range for intensity for SUN lights in scene.
energy_range_point: Tuple[int]: Range for intensity for POINT, SPOT, and AREA lights in scene.
"""
# check if lights are in the scene, if not log error
for obj in bpy.context.scene.objects:
if obj.type == "LIGHT":
continue
else:
log.debug("add lights to use this function")
# Loop through objects in scene and randomly toggle them on and off in the render,
# these will still be visible in preview scene
for obj in bpy.data.lights:
if obj.type == "POINT" or obj.type == "SPOT" or obj.type == "AREA":
bpy.data.objects[obj.name].hide_render = bool(random.randint(0, 1))
if energy_jitter:
bpy.data.objects[obj.name].data.energy = random.randint(
*energy_range_point
)
if obj.type == "SUN":
bpy.data.objects[obj.name].hide_render = bool(random.randint(0, 1))
if energy_jitter:
bpy.data.objects[obj.name].data.energy = random.randint(
*energy_range_sun
)
# bpy.data.scenes["Scene"].world.use_nodes = True
if jitter:
zpy.objects.jitter(
obj.name,
translate_range=(
(-2, 2),
(-2, -2),
(1, 5),
),
) | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/objects.py | objects.py |
import logging
from pathlib import Path
from typing import List, Tuple, Union
import zpy
log = logging.getLogger(__name__)
# Random colors are loaded on module import
RANDOM_COLOR_IDX = 1
COLORS_FILE = "colors.json"
COLORS = None
def reset(random_color_idx: int = 1):
"""Load colors from file and reset random idx."""
global COLORS, RANDOM_COLOR_IDX
_path = Path(__file__).parent / COLORS_FILE
zpy.files.verify_path(_path)
COLORS = zpy.files.read_json(_path)
RANDOM_COLOR_IDX = random_color_idx
def hex_to_irgb(hex_value: str) -> Tuple[int]:
"""Convert hex value to integer rgb (0 to 255)."""
hex_value = int(hex_value[1:], 16)
b = hex_value & 0xFF
g = (hex_value >> 8) & 0xFF
r = (hex_value >> 16) & 0xFF
return r, g, b
def frgb_to_frgba(rgb: Tuple[float], a=1.0) -> Tuple[float]:
"""Convert 3-channel RGB to 4-channel RGBA."""
_val = *rgb, a
return _val
def hex_to_frgb(hex_value: str) -> Tuple[float]:
"""Convert hex value to float rgb (0 to 1)."""
return irgb_to_frgb(hex_to_irgb(hex_value))
def irgb_to_frgb(irgb: Tuple[int]) -> Tuple[float]:
"""Convert integer rgb (0 to 255) to float rgb (0 to 1)."""
max_rgb_value = 255
return tuple((x / max_rgb_value) for x in irgb)
def irgb_to_hex(irgb: Tuple[int]) -> str:
"""Convert integer rgb (0 to 255) to hex."""
r, g, b = irgb
return "#%02x%02x%02x" % (r, g, b)
def frgb_to_irgb(frgb: Tuple[float]) -> Tuple[int]:
"""Convert float rgb (0 to 1) to integer rgb (0 to 255)."""
max_rgb_value = 255
return tuple((int(x * max_rgb_value) for x in frgb))
def frgb_to_hex(frgb: Tuple[float]) -> str:
"""Convert float rgb (0 to 1) to hex."""
return irgb_to_hex(frgb_to_irgb(frgb))
def _gamma_correction(channel: float) -> float:
"""Convert float rgb (0 to 1) to the gamma-corrected sRGB float (0 to 1)
for a single color channel."""
exponent_srgb = 1 / 2.2
return channel ** exponent_srgb
def frgb_to_srgb(frgb: Tuple[float]) -> Tuple[float]:
"""Convert float rgb (0 to 1) to the gamma-corrected sRGB float (0 to 1)."""
return tuple(_gamma_correction(x) for x in frgb)
def frgb_to_srgba(frgb: Tuple[float], a=1.0) -> Tuple[float]:
"""Convert float rgb (0 to 1) to the gamma-corrected sRGBA float (0 to 1)."""
srgb = frgb_to_srgb(frgb)
srgba = frgb_to_frgba(srgb, a=a)
return srgba
def _output_style(
name: str, hex: str, output_style: str
) -> Union[Tuple[float, int, str], str]:
"""Convert hex to an output style."""
if output_style == "frgb":
return hex_to_frgb(hex)
if output_style == "frgba":
return frgb_to_frgba(hex_to_frgb(hex))
elif output_style == "irgb":
return hex_to_irgb(hex)
elif output_style == "hex":
return hex
elif output_style == "name_irgb":
return name, hex_to_irgb(hex)
elif output_style == "name_frgb":
return name, hex_to_frgb(hex)
elif output_style == "name_frgba":
return name, frgb_to_frgba(hex_to_frgb(hex))
else:
raise ValueError("Color must be frgb, irgb, or hex.")
def default_color(output_style: str = "frgb") -> Union[Tuple[float, int, str], str]:
"""Default color."""
global COLORS
if COLORS is None:
reset()
_name = COLORS[0]["name"]
_hex = COLORS[0]["hex"]
log.debug(f"Default color chosen is {_name} - {_hex} - {hex_to_frgb(_hex)}")
return _output_style(_name, _hex, output_style=output_style)
def random_color(output_style: str = "frgb") -> Union[Tuple[float, int, str], str]:
"""Random color.
This will go through a pre-baked list every time,
to prevent different random seeds from changing the
color for a category.
"""
global RANDOM_COLOR_IDX, COLORS
if COLORS is None:
reset()
_name = COLORS[RANDOM_COLOR_IDX]["name"]
_hex = COLORS[RANDOM_COLOR_IDX]["hex"]
# Update global color idx
RANDOM_COLOR_IDX += 1
if RANDOM_COLOR_IDX > len(COLORS):
log.error("Ran out of unique colors!")
log.debug(f"Random color chosen is {_name} - {_hex} - {hex_to_frgb(_hex)}")
return _output_style(_name, _hex, output_style=output_style)
def closest_color(
color: Tuple[float],
colors: List[Tuple[float]],
max_dist: float = 0.01,
) -> Union[None, Tuple[float]]:
"""Get the index of the closest color in a list to the input color."""
min_dist = 3.0
nearest_idx = 0
for i, _color in enumerate(colors):
dist = (
(color[0] - _color[0]) ** 2
+ (color[1] - _color[1]) ** 2
+ (color[2] - _color[2]) ** 2
)
if dist < min_dist:
min_dist = dist
nearest_idx = i
if min_dist > max_dist:
log.debug(f"No color close enough w/ maxmimum distance of {max_dist}")
return None
return colors[nearest_idx] | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/color.py | color.py |
import logging
from pathlib import Path
from typing import Dict, Tuple, Union
import gin
import numpy as np
import zpy
log = logging.getLogger(__name__)
@gin.configurable
class ImageSaver(zpy.saver.Saver):
"""Saver class for Image based datasets."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.images = {}
self.image_name_to_id = {}
self.seg_annotations_color_to_id = {}
@gin.configurable
def add_image(
self,
name: str = "default image",
style: str = "default",
output_path: Union[Path, str] = "/tmp/test.png",
frame: int = 0,
width: int = 640,
height: int = 480,
zero_indexed: bool = True,
**kwargs,
) -> Dict:
"""Add a new image annotation to the Saver object.
Pass any additional keys you want in the image annotation dict as kwargs.
Args:
name (str, optional): Unique image name. Defaults to 'default image'.
style (str, optional): Type of image in [default, segmenation, depth]. Defaults to 'default'.
output_path (Union[Path, str], optional): Path to image file. Defaults to '/tmp/test.png'.
frame (int, optional): Frame is used to link images taken at the same moment in time. Defaults to 0.
width (int, optional): Width of image. Defaults to 640.
height (int, optional): Height of image. Defaults to 480.
zero_indexed (bool, optional): Whether image id is zero indexed. Defaults to True.
Returns:
Dict: The image annotation dictionary.
"""
image = {
"name": name,
"style": style,
"output_path": str(output_path),
"relative_path": str(Path(output_path).relative_to(self.output_dir)),
"frame": frame,
"width": width,
"height": height,
}
image.update(**kwargs)
image["id"] = len(self.images.keys())
image["id"] += 0 if zero_indexed else 1
log.debug(f"Adding image: {zpy.files.pretty_print(image)}")
self.images[image["id"]] = image
self.image_name_to_id[name] = image["id"]
return image
@gin.configurable
def add_annotation(
self,
image: str = "default image",
category: str = None,
subcategory: str = None,
subcategory_zero_indexed: bool = True,
seg_image: str = None,
seg_color: Tuple[float] = None,
parse_on_add: bool = True,
**kwargs,
) -> Dict:
"""Add a new annotation to the Saver object.
Pass any additional keys you want in the annotation dict as kwargs.
Args:
image (str, optional): Unique image name. Defaults to 'default image'.
category (str, optional): Name of category. Defaults to None.
subcategory (str, optional): Name of subcategory. Defaults to None.
subcategory_zero_indexed (bool, optional): Whether subcategories are zero-indexed. Defaults to True.
seg_image (str, optional): Name of segmentation image that corresponds to this image. Defaults to None.
seg_color (Tuple[float], optional): Segmentation color of entity in this annotation. Defaults to None.
parse_on_add (bool, optional): Calculate bounding box and polygons and populate segmenation fields.
Defaults to True.
Returns:
Dict: The annotation dictionary.
"""
image_id = self.image_name_to_id.get(image, None)
assert image_id is not None, f"Could not find id for image {image}"
assert category is not None, "Must provide a category for annotation."
category_id = self.category_name_to_id.get(category, None)
assert category_id is not None, f"Could not find id for category {category}"
self.categories[category_id]["count"] += 1
annotation = {
"image_id": image_id,
"category_id": category_id,
}
if subcategory is not None:
subcategory_id = self.categories[category_id]["subcategories"].index(
subcategory
)
self.categories[category_id]["subcategory_count"][subcategory_id] += 1
subcategory_id += 0 if subcategory_zero_indexed else 1
annotation["subcategory_id"] = subcategory_id
annotation.update(**kwargs)
annotation["id"] = len(self.annotations)
log.info(f"Adding annotation: {zpy.files.pretty_print(annotation)}")
# For segmentation images, add bbox/poly/mask annotation
if seg_image is not None and seg_color is not None:
seg_image_id = self.image_name_to_id.get(seg_image, None)
assert seg_image_id is not None, f"Could not find id for image {seg_image}"
annotation["seg_color"] = seg_color
if self.seg_annotations_color_to_id.get(seg_image, None) is None:
self.seg_annotations_color_to_id[seg_image] = {}
self.seg_annotations_color_to_id[seg_image][seg_color] = annotation["id"]
self.annotations.append(annotation)
# This call creates correspondences between segmentation images
# and the annotations. It should be used after both the images
# and annotations have been added to the saver.
if parse_on_add:
self.parse_annotations_from_seg_image(image_name=seg_image)
return annotation
def parse_annotations_from_seg_image(
self,
image_name: str,
) -> None:
"""Populate annotation field based on segmentation image.
Args:
image_name (str): Name of image in which to put parse out segmentations.
Raises:
ValueError: Image is not a segmentation image.
"""
# Verify that file is segmentation image
is_iseg = zpy.files.file_is_of_type(image_name, "instance segmentation image")
is_cseg = zpy.files.file_is_of_type(image_name, "class segmentation image")
if not (is_iseg or is_cseg):
raise ValueError("Image is not segmentation image")
seg_image_id = self.image_name_to_id.get(image_name, None)
assert seg_image_id is not None, f"Could not find id for image {image_name}"
image_path = self.images[seg_image_id]["output_path"]
if self.seg_annotations_color_to_id.get(image_name, None) is None:
log.warning(f"No annotations found for {image_name}")
for annotation in zpy.image.seg_to_annotations(image_path):
if (
self.seg_annotations_color_to_id[image_name].get(
annotation["color"], None
)
is None
):
log.warning(
f'No annotations found for color {annotation["color"]} in {image_name}'
)
log.warning(
f"Available colors are {list(self.seg_annotations_color_to_id[image_name].keys())}"
)
closest_color = zpy.color.closest_color(
annotation["color"],
list(self.seg_annotations_color_to_id[image_name].keys()),
)
if closest_color is None:
log.warning("Could not find close enough color, skipping ...")
continue
else:
log.warning(f"Using closest color {closest_color}")
idx = self.seg_annotations_color_to_id[image_name][closest_color]
else:
idx = self.seg_annotations_color_to_id[image_name][annotation["color"]]
self.annotations[idx].update(annotation)
@gin.configurable
def output_annotated_images(
self,
num_annotated_images: int = 10,
) -> None:
"""Dump annotated sampled images to the meta folder.
Args:
num_annotated_images (int, optional): Number of annotation images to output. Defaults to 10.
"""
log.info("Output annotated images...")
import zpy.viz
output_path = self.output_dir / self.HIDDEN_METAFOLDER_FILENAME
output_path = zpy.files.verify_path(output_path, make=True, check_dir=True)
for i, image in enumerate(self.images.values()):
# Annotation images take a while
if i >= num_annotated_images:
return
annotations = []
for annotation in self.annotations:
if annotation["image_id"] == image["id"]:
annotations.append(annotation)
if len(annotations) > 0:
zpy.viz.draw_annotations(
image_path=Path(image["output_path"]),
annotations=annotations,
categories=self.categories,
output_path=output_path,
)
@gin.configurable
def output_meta_analysis(
self,
image_sample_size: int = 50,
) -> None:
"""Perform a full meta analysis, outputting some meta files.
Args:
image_sample_size (int, optional): How many images to sample for meta analysis. Defaults to 50.
"""
log.info(f"perform meta analysis image_sample_size:{image_sample_size}...")
import zpy.files
image_paths = [
i["output_path"] for i in self.images.values() if i["style"] == "default"
]
image_paths = zpy.files.sample(image_paths, sample_size=image_sample_size)
opened_images = [zpy.image.open_image(i) for i in image_paths]
flat_images = zpy.image.flatten_images(opened_images)
pixel_mean_std = zpy.image.pixel_mean_std(flat_images)
meta_dict = {
"number_images": len(self.images),
"number_annotations": len(self.annotations),
"number_categories": len(self.categories),
"category_names": [c["name"] for c in self.categories.values()],
"pixel_mean": np.array2string(pixel_mean_std["mean"], precision=2),
"pixel_std": np.array2string(pixel_mean_std["std"], precision=2),
"pixel_256_mean": np.array2string(pixel_mean_std["mean_256"], precision=0),
"pixel_256_std": np.array2string(pixel_mean_std["std_256"], precision=0),
}
output_path = self.output_dir / self.HIDDEN_METAFOLDER_FILENAME
output_path = zpy.files.verify_path(output_path, make=True, check_dir=True)
self.write_datasheet(output_path / self.HIDDEN_DATASHEET_FILENAME, meta_dict)
try:
import zpy.viz
zpy.viz.image_grid_plot(images=opened_images, output_path=output_path)
zpy.viz.image_shape_plot(images=opened_images, output_path=output_path)
zpy.viz.color_correlations_plot(
flat_images=flat_images, output_path=output_path
)
zpy.viz.pixel_histograms(flat_images=flat_images, output_path=output_path)
zpy.viz.category_barplot(
categories=self.categories, output_path=output_path
)
except Exception as e:
log.warning(f"Error when visualizing {e}")
pass | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/saver_image.py | saver_image.py |
import logging
import os
from pathlib import Path
import bpy
import zpy
log = logging.getLogger(__name__)
def get_asset_lib_path() -> Path:
"""Returns path to asset library location.
Defaults to the directory of the blenderfile.
Returns:
Path: pathlib.Path object to library location.
"""
assets_env_path = os.environ.get("ASSETS", None)
if assets_env_path is None:
log.warning("Could not find environment variable $ASSETS")
return None
else:
assets_env_path = zpy.files.verify_path(assets_env_path, check_dir=True)
log.info(f"Found assets path at {assets_env_path}")
return assets_env_path
def script_template_dir() -> Path:
"""Path to the script templates for zpy addon.
Returns:
pathlib.Path: Path to script templates for zpy addon.
"""
script_path = Path(bpy.utils.script_path_user())
template_dir = script_path / "addons" / "zpy_addon" / "templates"
return zpy.files.verify_path(template_dir, check_dir=True)
def hdri_dir() -> Path:
"""Path to the HDRI directory.
Returns:
pathlib.Path: Path to HDRI directory.
"""
asset_path = zpy.assets.get_asset_lib_path()
if asset_path is None:
# Path to directory containing default Blender HDRIs (exr)
_path = Path(bpy.utils.resource_path("LOCAL"))
hdri_dir = _path / "datafiles" / "studiolights" / "world"
else:
hdri_dir = asset_path / "lib" / "hdris" / "1k"
hdri_dir = zpy.files.verify_path(hdri_dir, check_dir=True)
log.debug(f"Using HDRI directory at {hdri_dir}")
return hdri_dir
def texture_dir() -> Path:
"""Path to the textures directory.
Returns:
pathlib.Path: Path to textures directory.
"""
asset_path = zpy.assets.get_asset_lib_path()
if asset_path is None:
_path = Path(bpy.utils.script_path_user())
texture_dir = _path / "addons" / "zpy_addon" / "assets"
else:
texture_dir = asset_path / "lib" / "textures" / "random_512p"
texture_dir = zpy.files.verify_path(texture_dir, check_dir=True)
log.debug(f"Using texture directory at {texture_dir}")
return texture_dir | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/assets.py | assets.py |
import logging
import math
import random
from pathlib import Path
from typing import Tuple, Union
import bpy
import mathutils
import gin
import zpy
log = logging.getLogger(__name__)
@gin.configurable
def load_hdri(
path: Union[Path, str],
scale: Tuple[float] = (1.0, 1.0, 1.0),
random_z_rot: bool = True,
) -> None:
"""Load an HDRI from path.
Args:
path (Union[Path, str]): Path to the HDRI.
scale (Tuple[float], optional): Scale in (x, y, z). Defaults to (1.0, 1.0, 1.0).
random_z_rot (bool, optional): Randomly rotate HDRI around Z axis. Defaults to True.
"""
scene = zpy.blender.verify_blender_scene()
scene.world.use_nodes = True
tree = scene.world.node_tree
out_node = zpy.nodes.get_or_make(
"World Output", "ShaderNodeOutputWorld", tree, pos=(0, 0)
)
bg_node = zpy.nodes.get_or_make(
"Background", "ShaderNodeBackground", tree, pos=(-150, 0)
)
env_node = zpy.nodes.get_or_make(
"Environment Texture", "ShaderNodeTexEnvironment", tree, pos=(-400, 0)
)
log.info(f"Loading HDRI at {path}")
path = zpy.files.verify_path(path, make=False)
env_node.image = bpy.data.images.load(str(path))
env_node.texture_mapping.scale = mathutils.Vector(scale)
world_rot_node = zpy.nodes.get_or_make(
"World Rotation", "ShaderNodeVectorRotate", tree, pos=(-550, 0)
)
world_rot_node.rotation_type = "Z_AXIS"
if random_z_rot:
world_rotation = random.uniform(0, math.pi)
log.debug(f"Rotating HDRI randomly along Z axis to {world_rotation}")
world_rot_node.inputs["Angle"].default_value = world_rotation
texcoord_node = zpy.nodes.get_or_make(
"Texture Coordinate", "ShaderNodeTexCoord", tree, pos=(-730, 0)
)
# Link all the nodes together
tree.links.new(out_node.inputs[0], bg_node.outputs[0])
tree.links.new(bg_node.inputs[0], env_node.outputs[0])
tree.links.new(env_node.inputs[0], world_rot_node.outputs[0])
tree.links.new(world_rot_node.inputs[0], texcoord_node.outputs[0])
@gin.configurable
def random_hdri(
apply_to_scene: bool = True,
scale: Tuple[float] = (1.0, 1.0, 1.0),
) -> Path:
"""Generate a random HDRI from an asset path.
Args:
apply_to_scene (bool, optional): Load the HDRI into the active scene. Defaults to True.
Returns:
Path: Path to the random HDRI.
"""
hdri_dir = zpy.assets.hdri_dir()
hdri_path = zpy.files.pick_random_from_dir(
hdri_dir, suffixes=[".exr", ".hdri", ".hdr"]
)
if apply_to_scene:
load_hdri(hdri_path, scale=scale)
return hdri_path | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/hdris.py | hdris.py |
import logging
from pathlib import Path
from typing import Callable, List, Union
import gin
import zpy
log = logging.getLogger(__name__)
class CSVParseError(Exception):
"""Invalid CSV Annotation found when parsing data contents."""
pass
@gin.configurable
class OutputCSV(zpy.output.Output):
"""Output class for CSV (comma separated value) style annotations."""
ANNOTATION_FILENAME = Path("annotations.csv")
def __init__(self, *args, **kwargs) -> Path:
super().__init__(*args, annotation_filename=self.ANNOTATION_FILENAME, **kwargs)
@gin.configurable
def output_annotations(
self,
annotation_path: Union[Path, str] = None,
annotation_dict_to_csv_row_func: Callable = None,
header: List[str] = None,
) -> Path:
"""Output CSV annotations to file.
Args:
annotation_path (Union[Path, str], optional): Output path for annotation file.
annotation_dict_to_csv_row_func (Callable, optional): Function that converts an annotation dict to csv row.
Defaults to None.
header (List[str], optional): Column headers. Defaults to None.
Returns:
Path: Path to annotation file.
"""
annotation_path = super().output_annotations(annotation_path=annotation_path)
if annotation_dict_to_csv_row_func is None:
raise CSVParseError(
"Output CSV annotations requires a annotation_dict_to_csv_row_func"
)
csv_data = []
if header is not None:
csv_data.append(header)
for annotation in self.saver.annotations:
row = annotation_dict_to_csv_row_func(annotation, saver=self.saver)
if row is not None:
csv_data.append(row)
# Write out annotations to file
zpy.files.write_csv(annotation_path, csv_data)
# Verify annotations
parse_csv_annotations(annotation_path)
return annotation_path
@gin.configurable
def parse_csv_annotations(
annotation_file: Union[Path, str],
) -> None:
"""Parse CSV annotations.
Args:
annotation_file (Union[Path, str]): Path to annotation file.
Raises:
CSVParseError: Rows not same length.
"""
log.info(f"Verifying CSV annotations at {annotation_file}...")
csv_data = zpy.files.read_csv(annotation_file)
# Make sure all the rows are the same length
csv_data_iterable = iter(csv_data)
try:
length = len(next(csv_data_iterable))
except StopIteration:
raise CSVParseError(f"No data found in CSV at {annotation_file}")
log.debug(f"Row length in CSV: {[length for l in csv_data_iterable]}")
if not all(len(row) == length for row in csv_data_iterable):
raise CSVParseError(f"Not all rows in the CSV have same length {length}")
# TODO: Return Saver object. | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/output_csv.py | output_csv.py |
import inspect
import logging
import random
import time
from functools import wraps
from pathlib import Path
from typing import Dict, List, Union
import bpy
import gin
import mathutils
import numpy as np
import zpy
log = logging.getLogger(__name__)
def use_gpu(
compute_device_type="CUDA",
use_cpu=True,
) -> None:
"""Choose the rendering devices for rendering.
The hybrid render device options (GPU+CPU) are possible for CUDA and OPTIX
Args:
compute_device_type (str, optional): One of [NONE, CUDA, OPTIX, OPENCL]. Defaults to 'CUDA'.
use_cpu (bool, optional): Use CPU with compute device. Defaults to True.
Raises:
RuntimeError: Compute device is not a valid choice.
"""
C = bpy.context
preferences = bpy.context.preferences
cycles_preferences = preferences.addons["cycles"].preferences
compute_devices = [d[0] for d in cycles_preferences.get_device_types(C)]
if compute_device_type not in compute_devices:
raise RuntimeError("Non-existing device type")
else:
cycles_preferences.compute_device_type = compute_device_type
devices = cycles_preferences.get_devices_for_type(compute_device_type)
if len(devices) > 0:
for c in devices:
c.use = True
if c.type == "CPU":
c.use = use_cpu
log.info(f"Using devices {c} {c.type} {c.use}")
C.scene.cycles.device = "GPU"
log.info(f"Using gpu type:{compute_device_type} cpu:{use_cpu}")
@gin.configurable
def set_seed(
seed: int = 0,
) -> None:
"""Set the random seed (sets the python and numpy seed).
Args:
seed (int, optional): Integer seed. Defaults to 0.
"""
if log.getEffectiveLevel() == logging.DEBUG:
# When debugging you want to run into errors related
# to specific permutations of the random variables, so
# you need to vary the seed to run into them.
seed = random.randint(1, 100)
log.info(f"Setting random seed to {seed}")
random.seed(seed)
np.random.seed(seed)
mathutils.noise.seed_set(seed)
@gin.configurable
def step(
num_steps: int = 3,
framerate: int = 1,
start_frame: int = 1,
refresh_ui: bool = False,
) -> int:
"""Steps the sim forward (Blender frames).
Args:
num_steps (int, optional): Number of steps to take before the yield stops. Defaults to 16.
framerate (int, optional): How many Blender frames to move forward in each step. Defaults to 0.
start_frame (int, optional): Blender frame to start on. Defaults to 1.
refresh_ui (bool, optional): Refresh the Blender UI on every step. Defaults to False.
Returns:
int: step id
Yields:
Iterator[int]: Step id
"""
assert num_steps is not None, "Invalid num_steps"
assert num_steps > 0, "Invalid num_steps"
scene = zpy.blender.verify_blender_scene()
step_idx = 0
if framerate > 0:
start = scene.frame_start
stop = scene.frame_end
log.info(f"Animation enabled. Min frames: {start}. Max frames: {stop}")
while step_idx < num_steps:
zpy.logging.linebreaker_log("step")
log.info(f"Simulation step {step_idx + 1} of {num_steps}.")
start_time = time.time()
if framerate > 0:
current_frame = start_frame + step_idx * framerate
scene.frame_set(current_frame)
log.info(f"Animation frame {scene.frame_current}")
yield step_idx
step_idx += 1
duration = time.time() - start_time
log.info(f"Simulation step took {duration}s to complete.")
# TODO: This call is not needed in headless instances, makes loop faster
if refresh_ui:
refresh_blender_ui()
@gin.configurable
def verify_view_layer(
view_layer_name: str = "View Layer",
) -> bpy.types.ViewLayer:
"""Get and set the view layer in Blender.
Args:
view_layer_name (str, optional): Name for View Layer. Defaults to 'View Layer'.
Returns:
bpy.types.ViewLayer: View Layer that will be used at runtime.
"""
scene = zpy.blender.verify_blender_scene()
view_layer = scene.view_layers.get(view_layer_name, None)
if view_layer is None:
log.debug(f"Could not find view layer {view_layer_name}")
# Default behavior is to use first view layer
view_layer = scene.view_layers[0]
log.debug(f"Setting view layer to {view_layer.name}")
bpy.context.window.view_layer = view_layer
return view_layer
@gin.configurable
def verify_blender_scene(
blender_scene_name: str = "Scene",
) -> bpy.types.Scene:
"""Get and set the scene in Blender.
Args:
blender_scene_name (str, optional): Name for Scene. Defaults to 'Scene'.
Returns:
bpy.types.Scene: Scene that will be used at runtime.
"""
scene = bpy.data.scenes.get(blender_scene_name, None)
if scene is None:
log.debug(f"Could not find scene {blender_scene_name}")
# Default behavior is to use the first scene
scene = bpy.data.scenes[0]
log.debug(f"Setting scene to {scene.name}")
bpy.context.window.scene = scene
return scene
def parse_config(
text_name: str = "config",
) -> None:
"""Parses the gin config text in Blender.
Args:
text_name (str, optional): Name of the config text. Defaults to 'config'.
"""
_text = bpy.data.texts.get(text_name, None)
if _text is None:
log.warning(f"Could not find {text_name} in texts.")
return
log.info(f"Loading gin config {text_name}")
gin.enter_interactive_mode()
with gin.unlock_config():
gin.parse_config(_text.as_string())
gin.finalize()
def save_and_revert(_func):
"""Decorator for saving blenderfile before execution, and
reverting after execution.
Args:
_func (callable): function to be decorated.
Returns:
[callable]: Wrapped function.
"""
@wraps(_func)
def wrapped_func(*args, **kwargs) -> None:
log.info("Saving the sim.")
bpy.ops.wm.save_mainfile()
try:
_func(*args, **kwargs)
except Exception as e:
log.error(f"Executing {_func.__name__} failed with exception {e}")
raise e
finally:
log.info("Reverting sim to previous savepoint.")
bpy.ops.wm.revert_mainfile()
return wrapped_func
def load_text_from_file(
path: Union[Path, str],
text_name: str = "",
open_text: bool = False,
) -> None:
"""Load a file into Blender's internal text UI.
Args:
path (Union[Path, str]): Filesystem path.
text_name (str, optional): Name of Blender text to write to.
"""
path = zpy.files.verify_path(path)
if bpy.data.texts.get(text_name, None) is None:
_text = bpy.data.texts.load(str(path), internal=True)
_text.name = text_name
else:
bpy.data.texts[text_name].from_string(path.read_text())
if open_text:
for area in bpy.context.screen.areas:
if area.type == "TEXT_EDITOR":
area.spaces[0].text = bpy.data.texts[text_name]
@gin.configurable
def connect_addon(
name: str = "zpy_addon", addon_dir: Union[Path, str] = "$BLENDERADDONS"
) -> None:
"""Connects a Blender Addon.
Args:
name (str, optional): Name of Addon. Defaults to 'zpy_addon'.
addon_dir (Union[Path, str], optional): Directory of addons. Defaults to '$BLENDERADDONS'.
"""
log.debug(f"Connecting Addon {name}.")
path = f"$BLENDERADDONS/{name}/__init__.py"
path = zpy.files.verify_path(path, make=False)
bpy.ops.preferences.addon_install(filepath=str(path))
bpy.ops.preferences.addon_enable(module=name)
@gin.configurable
def connect_debugger_vscode(
timeout: int = 3,
) -> None:
"""Connects to a VSCode debugger.
https://github.com/AlansCodeLog/blender-debugger-for-vscode
Args:
timeout (int, optional): Seconds to connect before timeout. Defaults to 3.
"""
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Starting VSCode debugger in Blender.")
connect_addon("blender-debugger-for-vscode")
bpy.ops.debug.connect_debugger_vscode()
for sec in range(timeout):
log.debug(f"You have {timeout - sec} seconds to connect!")
time.sleep(1)
def save_debug_blenderfile(
path: Union[Path, str] = None,
) -> None:
"""Saves an intermediate blenderfile for debugging purposes.
Args:
path (Union[Path, str], optional): Output directory path.
"""
if path is None:
path = zpy.files.default_temp_path() / "_debug.blend"
path = zpy.files.verify_path(path, make=False)
log.debug(f"Saving intermediate blenderfile to {path}")
bpy.ops.wm.save_as_mainfile(filepath=str(path), compress=False, copy=True)
def refresh_blender_ui() -> None:
"""Refresh the Blender UI.
Does not work on headless instances.
"""
log.debug("Refreshing Blender UI.")
bpy.ops.wm.redraw_timer(type="DRAW_WIN_SWAP", iterations=1)
view_layer = zpy.blender.verify_view_layer()
view_layer.update()
def load_sim(
path: Union[Path, str],
auto_execute_scripts: bool = True,
) -> None:
"""Load a sim from a path to a *.blend file.
Args:
path (Union[Path, str]): Path to .blend file.
auto_execute_scripts (bool, optional): Whether to allow auto execution of scripts. Defaults to True.
"""
# HACK: Clear out scene of cameras and lights
clear_scene(["CAMERA", "LIGHT"])
path = zpy.files.verify_path(path, make=False)
log.debug(f"Loading sim from {str(path)}.")
with bpy.data.libraries.load(str(path)) as (data_from, data_to):
for attr in dir(data_to):
setattr(data_to, attr, getattr(data_from, attr))
# HACK: Delete current empty scene
bpy.ops.scene.delete()
# HACK: Delete extra workspaces that are created e.g. 'Animation.001'
_workspaces = [ws for ws in bpy.data.workspaces if ".0" in ws.name]
bpy.data.batch_remove(ids=_workspaces)
# Allow execution of scripts inside loaded sim
if auto_execute_scripts:
log.warning("Allowing .blend file to run scripts automatically")
log.warning(" this is unsafe for untrusted files")
bpy.context.preferences.filepaths.use_scripts_auto_execute = (
auto_execute_scripts
)
def clear_scene(
to_clear: List = ["MESH"],
) -> None:
"""Cleans objects in a scene based on the object type.
Args:
to_clear (List, optional): List of object types to clean. Defaults to ["MESH"].
"""
log.debug(f"Deleting all objects of type {to_clear}")
for obj in bpy.data.objects:
if obj.type in to_clear:
bpy.data.objects.remove(obj)
def scene_information() -> Dict:
"""Returns information on the scene, such as the kwargs in the run text.
Raises:
ValueError: Lack of run text and issues with the run text.
Returns:
Dict: Sim information dictionary.
"""
log.info("Collecting scene information")
run_script = bpy.data.texts.get("run", None)
if run_script is None:
raise ValueError("No run script found in scene.")
# HACK: Gin is confused by the as_module() call
gin.enter_interactive_mode()
run_script_module = bpy.data.texts["run"].as_module()
scene_doc = inspect.getdoc(run_script_module)
run_function = None
for name, value in inspect.getmembers(run_script_module):
if name == "run":
run_function = value
if run_function is None:
raise ValueError("No run() function found in run script.")
if not inspect.isfunction(run_function):
raise ValueError("run() is not a function in run script.")
run_kwargs = []
for param in inspect.signature(run_function).parameters.values():
_kwarg = {}
_kwarg["name"] = param.name
_kwarg["type"] = str(param.annotation)
_kwarg["default"] = param.default
run_kwargs.append(_kwarg)
scene = zpy.blender.verify_blender_scene()
_ = {
"name": scene.zpy_sim_name,
"version": scene.zpy_sim_version,
"description": scene_doc,
"run_kwargs": run_kwargs,
"export_date": time.strftime("%m%d%Y_%H%M_%S"),
"zpy_version": zpy.__version__,
"zpy_path": zpy.__file__,
"blender_version": ".".join([str(_) for _ in bpy.app.version]),
}
log.info(f"{_}")
return _ | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/blender.py | blender.py |
import functools
import hashlib
import json
import math
import os
import shutil
import sys
import uuid
import zipfile
from collections import defaultdict
from datetime import datetime
from itertools import groupby
from os import listdir
from os.path import join
from pathlib import Path
from typing import Iterable, List, Dict, Tuple, Callable
from typing import Union
import requests
from pydash import values, filter_
from requests import HTTPError
def track_runtime(wrapped_function):
@functools.wraps(wrapped_function)
def do_track(*args, **kwargs):
start_datetime = datetime.now()
value = wrapped_function(*args, **kwargs)
end_datetime = datetime.now()
run_time = end_datetime - start_datetime
print(
f"{str(wrapped_function)} took {run_time.seconds}.{run_time.microseconds} seconds."
)
return value
return do_track
def add_newline(func):
"""Decorator to print a new line after the function call.
Args:
func: function to wrap
Returns:
wrapped function
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
print("\n")
return ret
return wrapper
def auth_header(auth_token):
return {"Authorization": f"Token {auth_token}"}
def handle_response(response: requests.Response):
"""Shared logic for handling API responses.
Args:
response: Request to handle
Returns:
requests.Response
"""
if response.status_code != 200:
if response.status_code == 400:
# Known error from the server will have a nice message.
raise HTTPError(response.json())
else:
response.raise_for_status()
return response
def get(url, **kwargs):
"""GET a url. Forwards kwargs to requests.get
TODO: Merge with calling code in zpy/cli/
Args:
url (str): Ragnarok API url
kwargs: Forwarded to the requests.get function call
Returns:
requests.Response
Raises:
HTTPError
"""
verbose = kwargs.pop("verbose", False)
response = requests.get(url, **kwargs)
if verbose:
print(response.url)
return handle_response(response)
def post(url, **kwargs):
"""POST to a url. Forwards kwargs to requests.post
TODO: Merge with calling code in zpy/cli/
Args:
url (str): Ragnarok API url
kwargs: Forwarded to the requests.post function call
Returns:
requests.Response
Raises:
HTTPError
"""
return handle_response(requests.post(url, **kwargs))
def to_query_param_value(config):
"""Create the special query parameter value string for filtering generated-data-sets via config values.
Args:
config (dict): A dict of gin config values pre-flattened by using django field traversal notation. See Dataset._config
Returns:
string value for the config url query parameter
"""
query_param_values = []
for django_field_traversal, django_field_value in config.items():
# Ignore fields set as None. They weren't specifically set or asked for.
if django_field_value is not None:
query_param_values.append(f"{django_field_traversal}:{django_field_value}")
return ",".join(query_param_values)
def remove_none_values(obj: dict):
"""Recreates a dictionary from obj by omitting all key/value pairs where value is None."""
return {k: v for k, v in obj.items() if v is not None}
def convert_size(size_bytes: int):
"""Converts a number of bytes into a pretty string."""
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
ERASE_LINE = "\x1b[2K"
def clear_last_print():
sys.stdout.write(ERASE_LINE)
def is_done(state: str):
"""Returns True if state is a done state, false otherwise."""
return state in ["READY", "CANCELLED", "PACKAGING_FAILED", "GENERATING_FAILED"]
def remove_n_extensions(path: Union[str, Path], n: int) -> str:
"""
Removes n extensions from the end of a path. Example: "image.rgb.png" becomes "image" for n = 2
Args:
path (Path): Path to manipulate.
n (int): Number of extensions to remove.
Returns:
str: Path minus n extensions.
"""
""""""
p = Path(path)
for _ in range(n):
p = p.with_suffix("")
return str(p)
def dict_hash(data) -> str:
"""
Returns a deterministic hash from json serializable data.
https://www.doc.ic.ac.uk/~nuric/coding/how-to-hash-a-dictionary-in-python.html
Args:
data: JSON serializable data.
Returns:
str: Deterministic hash of the input data.
"""
data_json = json.dumps(
data,
sort_keys=True,
)
dhash = hashlib.md5()
encoded = data_json.encode()
dhash.update(encoded)
config_hash = dhash.hexdigest()
return config_hash
@track_runtime
def extract_zip(path_to_zip: Path) -> Path:
"""
Extracts a .zip to a new adjacent folder by the same name.
Args:
path_to_zip: Path to .zip
Returns:
Path: Extracted folder path
"""
unzipped_path = Path(remove_n_extensions(path_to_zip, n=1))
with zipfile.ZipFile(path_to_zip, "r") as zip_ref:
zip_ref.extractall(unzipped_path)
return unzipped_path
@track_runtime
def write_json(path, json_blob):
"""
Args:
path (str): Path to output to.
json_blob (obj): JSON serializable object.
"""
with open(path, "w") as outfile:
json.dump(json_blob, outfile, indent=4)
def group_by(iterable: Iterable, keyfunc: Callable) -> List[List]:
"""
Groups items in a list by equality using the value returned when passed to the callback
https://docs.python.org/3/library/itertools.html#itertools.groupby
Args:
iterable (Iterable): List of items to group
keyfunc (Callable): Callback that transforms each item in the list to a value used to test for equality against other items.
Returns:
list[list]: List of lists containing items that test equal to eachother when transformed by the keyfunc callback
"""
return [
list(group)
for key, group in groupby(
iterable,
keyfunc,
)
]
@track_runtime
def group_metadata_by_datapoint(
dataset_path: Path,
) -> Tuple[Dict, List[Dict], List[Dict]]:
"""
Updates metadata with new ids and accurate image paths.
Returns a list of dicts, each item containing metadata relevant to a single datapoint.
Args:
dataset_path (Path): Path to unzipped dataset.
Returns:
tuple (metadata: dict, categories: list[dict], datapoints: list[dict]): Returns a tuple of (metadata,
categories, datapoints), datapoints being a list of dicts, each containing a list of images and a list of
annotations.
"""
print("Parsing dataset to group by datapoint...")
accum_metadata = {}
accum_categories = {}
accum_datapoints = []
category_count_sums = defaultdict(int)
# batch level - group images by satapoint
for batch in listdir(dataset_path):
batch_uri = join(dataset_path, batch)
annotation_file_uri = join(batch_uri, "_annotations.zumo.json")
with open(annotation_file_uri) as annotation_file:
metadata = json.load(annotation_file)
accum_metadata = {**metadata["metadata"], "save_path": batch_uri}
for category_id, category in metadata["categories"].items():
category_count_sums[category_id] += category["count"]
for category_id, category in metadata["categories"].items():
accum_categories[category_id] = category
images_grouped_by_datapoint = group_by(
values(metadata["images"]),
lambda image: remove_n_extensions(image["relative_path"], n=2),
)
# datapoint level
for images in images_grouped_by_datapoint:
datapoint_uuid = str(uuid.uuid4())
# get datapoint specific annotations
image_ids = [i["id"] for i in images]
annotations = filter_(
metadata["annotations"], lambda a: a["image_id"] in image_ids
)
# mutate
image_new_id_map = {
img["id"]: datapoint_uuid + "".join(Path(img["name"]).suffixes[-2:])
for img in images
}
images_mutated = [
{
**i,
"output_path": join(batch_uri, Path(i["relative_path"])),
"id": image_new_id_map[i["id"]],
}
for i in images
]
annotations_mutated = [
{
**a,
"image_id": image_new_id_map[a["image_id"]],
}
for a in annotations
]
# accumulate
accum_datapoints.append(
{
"images": images_mutated,
"annotations": annotations_mutated,
}
)
for category_id, category_count in category_count_sums.items():
accum_categories[category_id]['count'] = category_count
return accum_metadata, values(accum_categories), accum_datapoints
def format_dataset(
zipped_dataset_path: Union[str, Path], datapoint_callback=None
) -> None:
"""
Updates metadata with new ids and accurate image paths.
If a datapoint_callback is provided, it is called once per datapoint with the updated metadata.
Otherwise the default is to write out an updated _annotations.zumo.json, along with all images, to a new adjacent folder.
Args:
zipped_dataset_path (str): Path to unzipped dataset.
datapoint_callback (Callable) -> None: User defined function.
Returns:
None: No return value.
"""
unzipped_dataset_path = Path(remove_n_extensions(zipped_dataset_path, n=1))
if not unzipped_dataset_path.exists():
print(f"Unzipping {zipped_dataset_path}...")
unzipped_dataset_path = extract_zip(zipped_dataset_path)
metadata, categories, datapoints = group_metadata_by_datapoint(
unzipped_dataset_path
)
if datapoint_callback is not None:
print("Skipping default formatting, using datapoint_callback instead.")
for datapoint in datapoints:
datapoint_callback(
datapoint["images"], datapoint["annotations"], categories
)
else:
print("Doing default formatting for dataset...")
output_dir = join(
unzipped_dataset_path.parent, unzipped_dataset_path.name + "_formatted"
)
os.makedirs(output_dir)
accum_metadata = {
"metadata": {
**metadata,
"save_path": output_dir,
},
"categories": {category["id"]: category for category in categories},
"images": {},
"annotations": [],
}
for datapoint in datapoints:
accum_metadata["annotations"].extend(datapoint["annotations"])
for image in datapoint["images"]:
# reference original path to save from
original_image_uri = image["output_path"]
# build new path
output_image_uri = join(output_dir, image["id"])
# add to accumulator
accum_metadata["images"][image["id"]] = {
**image,
"name": image["id"],
"relative_path": image["id"],
"output_path": join(output_dir, image["id"]),
}
# copy image to new folder
shutil.copy(original_image_uri, output_image_uri)
# write json
metadata_output_path = join(output_dir, Path("_annotations.zumo.json"))
os.makedirs(os.path.dirname(metadata_output_path), exist_ok=True)
write_json(metadata_output_path, accum_metadata) | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/client_util.py | client_util.py |
import logging
import random
import time
from pathlib import Path
from typing import Dict, List, Tuple, Union
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Arrow, Circle, Polygon, Rectangle
from matplotlib.ticker import MaxNLocator
import zpy
log = logging.getLogger(__name__)
def pretty_axes(ax: matplotlib.axes.Axes) -> matplotlib.axes.Axes:
"""Better looking matplotlib axes object."""
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().set_visible(False)
ax.grid(axis="y", alpha=0.75)
return ax
def plotter(func):
"""Decorator for plotting images."""
def wrapped(
output_path: Union[Path, str] = None,
show: bool = False,
**kwargs,
) -> matplotlib.figure.Figure:
output_path = zpy.files.verify_path(output_path, make=True, check_dir=True)
plt.style.use("fivethirtyeight")
plot_name, fig = func(**kwargs)
if show:
log.info(f"Displaying {plot_name}...")
plt.show()
time.sleep(1)
output_path = output_path / f"_plot.{plot_name}.png"
plt.savefig(output_path, bbox_inches="tight", pad_inches=0)
plt.close("all")
return fig
return wrapped
@plotter
def image_grid_plot(
images: List[np.ndarray] = None,
rows: int = 4,
cols: int = 4,
) -> Tuple[str, matplotlib.figure.Figure]:
"""Plots images in a grid."""
assert images is not None, "Images required."
sample_size = min(rows * cols, len(images))
images = random.sample(images, sample_size)
fig = plt.figure(figsize=(16, 16))
plt.suptitle("Sample Images", fontsize=18)
for n, image in enumerate(images):
plt.subplot(rows, cols, n + 1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
return "image_grid_plot", fig
@plotter
def image_shape_plot(
images: List[np.ndarray] = None,
) -> Tuple[str, matplotlib.figure.Figure]:
"""Plots 2D histogram of the image shapes."""
assert images is not None, "Images required."
image_shapes = [np.shape(_) for _ in images]
# HACK: Filter out 2D images
image_shapes = [_ for _ in image_shapes if len(_) == 3]
image_shape = np.asarray(image_shapes)
fig, ax = plt.subplots(figsize=(8, 8))
ax.hist2d(image_shape[:, 0], image_shape[:, 1], bins=(50, 50))
ax.set(title="Histogram of Image Sizes")
ax.set(xlabel="Width in Pixels")
ax.set(ylabel="Height in Pixels")
# Pixel ticks should be integers
ax.yaxis.set_major_locator(MaxNLocator(nbins=5, integer=True))
ax.xaxis.set_major_locator(MaxNLocator(nbins=5, integer=True))
return "image_shape_plot", fig
@plotter
def color_correlations_plot(
flat_images: List[np.ndarray] = None,
) -> Tuple[str, matplotlib.figure.Figure]:
"""Plots 2D histograms of color correlations: RG, RB, and BG."""
assert flat_images is not None, "Images required."
# HACK: Incorrect type assumption
flat_images = flat_images[0]
fig = plt.figure(figsize=(16, 5))
plt.rcParams["axes.grid"] = False
plt.suptitle("Pixel Color Correlations \n\n\n", fontsize=18)
plt.subplot(131)
plt.hist2d(flat_images[:, 0], flat_images[:, 1], bins=(50, 50), density=True)
plt.title("Red - Green", fontsize=16)
plt.subplot(132)
plt.hist2d(flat_images[:, 0], flat_images[:, 2], bins=(50, 50), density=True)
plt.title("Red - Blue", fontsize=16)
plt.subplot(133)
plt.hist2d(flat_images[:, 1], flat_images[:, 2], bins=(50, 50), density=True)
plt.title("Blue - Green", fontsize=16)
return "color_correlations_plot", fig
@plotter
def pixel_histograms(
flat_images: List[np.ndarray] = None,
) -> Tuple[str, matplotlib.figure.Figure]:
"""Plots histograms of pixel values for each color channel."""
assert flat_images is not None, "Images required."
import seaborn as sns
# HACK: Incorrect type assumption
flat_images = flat_images[0]
fig = plt.figure(figsize=(16, 8))
plt.suptitle("Pixel Histograms (Red, Green, Blue)", fontsize=18)
ax = plt.subplot(311)
ax = pretty_axes(ax)
sns.histplot(flat_images[:, 0], bins=255, color="r", ax=ax)
ax = plt.subplot(312)
ax = pretty_axes(ax)
sns.histplot(flat_images[:, 1], bins=255, color="g", ax=ax)
ax = plt.subplot(313)
ax = pretty_axes(ax)
sns.histplot(flat_images[:, 2], bins=255, color="b", ax=ax)
plt.tight_layout()
return "pixel_histograms", fig
@plotter
def category_barplot(
categories: Dict[str, Dict] = None,
) -> Tuple[str, matplotlib.figure.Figure]:
"""Histograms for categories and sub-categories."""
assert categories is not None, "categories required."
category_names = [c["name"] for c in categories.values()]
category_count = [c["count"] for c in categories.values()]
category_color = [c["color"] for c in categories.values()]
num_categories = len(category_names)
fig, ax = plt.subplots(figsize=(16, 6 * (num_categories + 1)))
plt.rcParams["axes.grid"] = False
# Category histograms
subplot_num_rows = num_categories + 1
subplot_num_cols = 1
subplot_plot_idx = 1
if num_categories >= 1:
ax = plt.subplot(subplot_num_rows, subplot_num_cols, subplot_plot_idx)
ax.barh(category_names, category_count, color=category_color)
ax.set(title="Annotations per Category")
ax.set(xlabel="Number of Annotations")
ax.set(ylabel="Category Name")
# Subcategory histograms
for i, category in enumerate(categories.values()):
subcategories = category["subcategories"]
if len(subcategories) > 1:
ax = plt.subplot(
subplot_num_rows, subplot_num_cols, subplot_plot_idx + i + 1
)
ax.barh(subcategories, category["subcategory_count"])
ax.set(title=f'Annotations per Subcategory of {category["name"]}')
ax.set(xlabel="Number of Annotations")
ax.set(ylabel=f'Subcategory of {category["name"]}')
return "category_histograms", fig
@plotter
def draw_annotations(
image_path: Union[Path, str] = None,
annotations: List = None,
categories: Dict[str, Dict] = None,
) -> None:
"""Given an path to an image draw annotations."""
log.info(f"draw annotations on {image_path}...")
image = zpy.image.open_image(image_path)
_, ax = plt.subplots()
ax.imshow(image)
for i, annotation in enumerate(annotations):
log.debug(f"{i}: {annotation}")
category_id = annotation["category_id"]
category_color = categories[category_id].get("color", None)
if category_color is None:
log.debug("Could not find category color, using random color instead.")
category_color = zpy.color.random_color()
if "num_keypoints" in annotation:
skeleton = categories[category_id]["skeleton"]
try:
keypoints = annotation["keypoints_xyv"]
except KeyError:
keypoints = annotation["keypoints"]
draw_keypoints(ax, keypoints, skeleton, "r")
# Only draw bounding box OR segmentation
if "segmentation" in annotation:
draw_segmentation(ax, annotation["segmentation"], category_color)
if "bbox" in annotation:
draw_bbox(
ax,
annotation["bbox"],
category_color,
text=annotation.get("bbox_text", None),
)
plt.axis("off")
fig = plt.gcf()
DPI = fig.get_dpi()
fig.set_size_inches(
image.shape[1] / float(DPI), # width
image.shape[0] / float(DPI), # height
)
full_name = f"{image_path.stem}.annotated"
return full_name, fig
def draw_bbox(
ax: matplotlib.axes.Axes,
bbox: List,
color: Tuple[int],
text: str = None,
alpha: float = 0.2,
) -> None:
"""Draw a bounding box on the matplotlib axes object."""
# TODO: fix the bordering in matplotlib so that the pixels
# line up appropriately bounding boxes are [x, y, w, h]
log.debug(f"Drawing bbox {bbox} {color}")
r = Rectangle(
(bbox[0], bbox[1]),
(bbox[2]),
(bbox[3]),
linewidth=3,
facecolor=color,
edgecolor=color,
alpha=alpha,
)
# Add text above box
if text is not None:
ax.text(
x=bbox[0],
y=bbox[1],
s=text,
color=color,
weight="bold",
fontsize=6,
ha="left",
va="bottom",
)
ax.add_patch(r)
def draw_segmentation(
ax: matplotlib.axes.Axes,
segmentation: List,
color: Tuple[int],
alpha: float = 0.6,
) -> None:
"""Draw a segmentation polygon on the matplotlib axes object."""
log.debug(f"Drawing segmentation {segmentation} {color}")
for seg in segmentation:
p = Polygon(
np.array(seg).reshape((int(len(seg) / 2), 2)),
linewidth=3,
color=color,
alpha=alpha,
)
ax.add_patch(p)
def draw_keypoints(
ax: matplotlib.axes.Axes,
keypoints: List,
skeleton: Dict,
color: Tuple[int],
alpha: float = 0.8,
) -> None:
"""
Draws keypoints of an instance and follows the rules for keypoint connections
to draw lines between appropriate keypoints.
"keypoints": [x1,y1,v1,...,xk,yk,vk]
- Keypoint coordinates are floats measured from the top left image corner (and are 0-indexed).
- We recommend rounding coordinates to the nearest pixel to reduce file size.
- v indicates visibility
v=0: not labeled (in which case x=y=0)
v=1: labeled but not visible
v=2: labeled and visible
"""
log.debug("Drawing keypoints")
for k1, k2 in skeleton:
# HACK: 0 indexed versus 1 indexed skeleton
if min(min(skeleton)) == 1:
k1 -= 1
k2 -= 1
k1_x = keypoints[3 * k1 + 0]
k1_y = keypoints[3 * k1 + 1]
k1_v = keypoints[3 * k1 + 2]
k2_x = keypoints[3 * k2 + 0]
k2_y = keypoints[3 * k2 + 1]
k2_v = keypoints[3 * k2 + 2]
if k1_v == 1:
circle = Circle(
(k1_x, k1_y), radius=5, edgecolor=color, facecolor="w", alpha=alpha
)
ax.add_patch(circle)
if k1_v == 2:
circle = Circle(
(k1_x, k1_y), radius=5, edgecolor=color, facecolor=color, alpha=alpha
)
ax.add_patch(circle)
if k2_v == 1:
circle = Circle(
(k2_x, k2_y), radius=5, edgecolor=color, facecolor="w", alpha=alpha
)
ax.add_patch(circle)
if k2_v == 2:
circle = Circle(
(k2_x, k2_y), radius=5, edgecolor=color, facecolor=color, alpha=alpha
)
ax.add_patch(circle)
if k1_v != 0 and k2_v != 0:
line = Arrow(k1_x, k1_y, k2_x - k1_x, k2_y - k1_y, color=color, alpha=alpha)
ax.add_patch(line) | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/viz.py | viz.py |
import logging
from pathlib import Path
from pprint import pformat
from typing import Any, Dict, Tuple, Union
import gin
import zpy
log = logging.getLogger(__name__)
def replace_human_redable_kwargs(
gin_bindings: Dict,
human_conversion: Dict = {
"seed": "zpy.blender.set_seed.seed",
"output_dir": "zpy.saver.Saver.output_dir",
"output_path": "zpy.saver.Saver.output_dir",
"num_images": "zpy.blender.step.num_steps",
"num_frames": "zpy.blender.step.num_steps",
},
) -> Tuple[str, Any]:
"""[summary]
Args:
gin_bindings (Dict): Gin bindings dictionary {gin binding : value}.
human_conversion (Dict, optional): Conversion from zpy keys to human-readable keys.
Returns:
Tuple[str, Any]: A single gin bindings.
Yields:
Iterator[Tuple[str, Any]]: New gin bindings.
"""
log.info("Converting human readable bindings to gin...")
for key, value in gin_bindings.items():
if human_conversion.get(key, None) is not None:
log.info(f"Converted {key} to {human_conversion[key]}")
yield human_conversion[key], value
else:
yield key, value
def parse_gin_bindings(
gin_bindings: Dict = None,
) -> None:
"""Parse any extra gin bindings to the config.
Args:
gin_bindings (Dict, optional): Gin bindings dictionary {gin binding : value}.
"""
if gin_bindings is None:
log.info("No additional gin bindings to parse")
else:
log.info(f"Parsing additional bindings: {pformat(gin_bindings)}")
with gin.unlock_config():
for key, value in replace_human_redable_kwargs(gin_bindings):
try:
gin.bind_parameter(key, value)
_message = "BOUND "
except Exception:
_message = "IGNORED"
log.info(f"{_message} - {key} : {value}")
def parse_gin_config(
gin_config: str = None,
gin_config_dir: Union[Path, str] = "$CONFIG",
) -> None:
"""Parse a gin config file by path.
Args:
gin_config (str, optional): Name of gin config.
gin_config_dir (Union[Path, str], optional): Directory with gin configs.
Raises:
zpy.requests.InvalidRequest: Cannot find gin config at path.
"""
if gin_config is None:
log.info("No gin file to parse.")
else:
if not gin_config.endswith(".gin"):
gin_config = gin_config + ".gin"
gin_config_filename = Path(gin_config)
gin_config_dir = zpy.files.verify_path(gin_config_dir, check_dir=True)
gin_config_path = gin_config_dir / gin_config_filename
log.info(f"Parsing gin config at {gin_config_path}")
if not gin_config_path.exists():
raise zpy.requests.InvalidRequest(
f"Could not find gin config at {gin_config_path}"
)
gin.clear_config()
gin.parse_config_file(str(gin_config_path))
def parse_gin_in_request(
request: Dict,
) -> None:
"""Parse any gin related keys in a request dict.
Args:
request (Dict): Request dictionary (see zpy.requests).
"""
zpy.gin.parse_gin_config(gin_config=request.get("gin_config", None))
zpy.gin.parse_gin_bindings(gin_bindings=request.get("gin_bindings", None))
gin.finalize() | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/gin.py | gin.py |
import functools
import json
import sys
import time
from datetime import datetime, timedelta
from os import listdir
from os.path import join
from pathlib import Path
from typing import Dict
from typing import Union
import requests
from pydash import set_, unset, is_empty
from cli.utils import download_url
from zpy.client_util import (
add_newline,
extract_zip,
get,
post,
to_query_param_value,
convert_size,
auth_header,
clear_last_print,
is_done,
format_dataset,
dict_hash,
remove_n_extensions,
)
_auth_token: str = ""
_base_url: str = ""
_project: Union[Dict, None] = None
def init(
auth_token: str,
project_uuid: str,
base_url: str = "https://ragnarok.zumok8s.org",
**kwargs,
):
global _auth_token, _base_url, _project
_auth_token = auth_token
_base_url = base_url
try:
_project = get(
f"{_base_url}/api/v1/projects/{project_uuid}",
headers=auth_header(_auth_token),
).json()
except requests.HTTPError:
print(
"Failed to find project, please double check the id and try again.",
file=sys.stderr,
)
IMAGES_PER_SAMPLE = 2 # for the iseg and rbg
DATASET_OUTPUT_PATH = Path("/tmp") # for generate and default_saver_func
def require_zpy_init(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if None in [_project, _auth_token, _base_url]:
raise RuntimeError("Project and auth_token must be set via zpy.init()")
return func(*args, **kwargs)
return wrapper
class DatasetConfig:
@require_zpy_init
def __init__(self, sim_name: str, **kwargs):
"""
Create a DatasetConfig. Used by zpy.preview and zpy.generate.
Args:
sim_name: Name of Sim
"""
self._sim = None
self._config = {}
unique_sim_filters = {
"project": _project["id"],
"name": sim_name,
}
sims = get(
f"{_base_url}/api/v1/sims/",
params=unique_sim_filters,
headers=auth_header(_auth_token),
).json()["results"]
if len(sims) > 1:
raise RuntimeError(
"Create DatasetConfig failed: Found more than 1 Sim for unique filters which should not be possible."
)
elif len(sims) == 1:
print(f"Found Sim<{sim_name}> in Project<{_project['name']}>")
self._sim = sims[0]
else:
raise RuntimeError(
f"Create DatasetConfig failed: Could not find Sim<{sim_name}> in Project<{_project['name']}>."
)
@property
def sim(self):
return self._sim
@property
def available_params(self):
return self._sim["run_kwargs"]
@property
def config(self):
"""A dict representing a json object of gin config parameters."""
return self._config
@property
def hash(self):
"""Return a hash of the config."""
return dict_hash(self._config)
def set(self, path: str, value: any):
"""Set a value for a configurable parameter.
Args:
path: The json gin config path. Ex. given object { a: b: [{ c: 1 }]}, the value at path "a.b[0]c" is 1.
value: The value for the gin config path provided.
"""
set_(self._config, path, value)
def unset(self, path):
"""Remove a configurable parameter.
Args:
See self.set
"""
unset(self._config, path)
@add_newline
def preview(dataset_config: DatasetConfig, num_samples=10):
"""
Generate a preview of output data for a given DatasetConfig.
Args:
dataset_config: Describes a Sim and its configuration. See DatasetConfig.
num_samples (int): number of preview samples to generate
Returns:
File[]: Sample images for the given configuration.
"""
print("Generating preview:")
config_filters = (
{}
if is_empty(dataset_config.config)
else {"config": to_query_param_value(dataset_config.config)}
)
filter_params = {
"project": _project["id"],
"sim": dataset_config.sim["name"],
"state": "READY",
"page-size": num_samples,
**config_filters,
}
simruns_res = get(
f"{_base_url}/api/v1/simruns/",
params=filter_params,
headers=auth_header(_auth_token),
)
simruns = simruns_res.json()["results"]
if len(simruns) == 0:
print("No preview available.")
print("\t(no premade SimRuns matching filter)")
return []
file_query_params = {
"run__sim": dataset_config.sim["id"],
"path__icontains": ".rgb",
"~path__icontains": ".annotated",
}
files_res = get(
f"{_base_url}/api/v1/files/",
params=file_query_params,
headers=auth_header(_auth_token),
)
files = files_res.json()["results"]
if len(files) == 0:
print("No preview available.")
print("\t(no images found)")
return []
return files
@add_newline
def generate(
dataset_config: DatasetConfig,
num_datapoints: int = 10,
materialize: bool = True,
datapoint_callback=None,
):
"""
Generate a dataset.
Args:
dataset_config (DatasetConfig): Specification for a Sim and its configurable parameters.
num_datapoints (int): Number of datapoints in the dataset. A datapoint is an instant in time composed of all
the output images (rgb, iseg, cseg, etc) along with the annotations.
datapoint_callback (fn): Callback function to be called with every datapoint in the generated Dataset.
materialize (bool): Optionally download the dataset. Defaults to True.
Returns:
None
"""
dataset_config_hash = dataset_config.hash
sim_name = dataset_config.sim["name"]
internal_dataset_name = f"{sim_name}-{dataset_config_hash}-{num_datapoints}"
filter_params = {"project": _project["id"], "name": internal_dataset_name}
datasets_res = get(
f"{_base_url}/api/v1/datasets",
params=filter_params,
headers=auth_header(_auth_token),
).json()
if len(datasets_res["results"]) == 0:
dataset = post(
f"{_base_url}/api/v1/datasets/",
data={
"project": _project["id"],
"name": internal_dataset_name,
},
headers=auth_header(_auth_token),
).json()
post(
f"{_base_url}/api/v1/datasets/{dataset['id']}/generate/",
data={
"project": _project["id"],
"sim": dataset_config.sim["name"],
"config": json.dumps(dataset_config.config),
"amount": num_datapoints,
},
headers=auth_header(_auth_token),
)
print("Generating dataset:")
print(json.dumps(dataset, indent=4, sort_keys=True))
else:
dataset = datasets_res["results"][0]
if materialize:
print("Materialize requested, waiting until dataset finishes to download it.")
dataset = get(
f"{_base_url}/api/v1/datasets/{dataset['id']}/",
headers=auth_header(_auth_token),
).json()
while not is_done(dataset["state"]):
all_simruns_query_params = {"datasets": dataset["id"]}
num_simruns = get(
f"{_base_url}/api/v1/simruns/",
params=all_simruns_query_params,
headers=auth_header(_auth_token),
).json()["count"]
num_ready_simruns = get(
f"{_base_url}/api/v1/simruns/",
params={**all_simruns_query_params, "state": "READY"},
headers=auth_header(_auth_token),
).json()["count"]
next_check_datetime = datetime.now() + timedelta(seconds=60)
while datetime.now() < next_check_datetime:
print(
"\r{}".format(
f"Dataset<{dataset['name']}> not ready for download in state {dataset['state']}. "
f"SimRuns READY: {num_ready_simruns}/{num_simruns}. "
f"Checking again in {(next_check_datetime - datetime.now()).seconds}s."
),
end="",
)
time.sleep(1)
clear_last_print()
print("\r{}".format("Checking dataset...", end=""))
dataset = get(
f"{_base_url}/api/v1/datasets/{dataset['id']}/",
headers=auth_header(_auth_token),
).json()
if dataset["state"] == "READY":
print("Dataset is ready for download.")
dataset_download_res = get(
f"{_base_url}/api/v1/datasets/{dataset['id']}/download/",
headers=auth_header(_auth_token),
).json()
name_slug = (
f"{str(dataset['name']).replace(' ', '_')}-{dataset['id'][:8]}.zip"
)
# Throw it in /tmp for now I guess
output_path = Path(DATASET_OUTPUT_PATH) / name_slug
existing_files = listdir(DATASET_OUTPUT_PATH)
if name_slug not in existing_files:
print(
f"Downloading {convert_size(dataset_download_res['size_bytes'])} dataset to {output_path}"
)
download_url(dataset_download_res["redirect_link"], output_path)
format_dataset(output_path, datapoint_callback)
print("Done.")
elif datapoint_callback is not None:
format_dataset(output_path, datapoint_callback)
else:
print(f"Dataset {name_slug} already exists in {output_path}.")
else:
print(
f"Dataset is no longer running but cannot be downloaded with state = {dataset['state']}"
)
return Dataset(dataset["name"], dataset)
class Dataset:
_dataset = None
@require_zpy_init
def __init__(self, name: str = None, dataset: dict = None):
"""
Construct a Dataset which is a local representation of a Dataset generated on the API.
Args:
name: If provided, Dataset will be automatically retrieved from the API.
dataset: If Dataset has already been retrieved from the API, provide this.
Returns
Dataset
"""
self._name = name
if dataset is not None:
self._dataset = dataset
else:
unique_dataset_filters = {
"project": _project["id"],
"name": name,
}
datasets = get(
f"{_base_url}/api/v1/datasets/",
params=unique_dataset_filters,
headers=auth_header(_auth_token),
).json()["results"]
self._dataset = datasets[0]
@property
def id(self):
return self._dataset["id"]
@property
def name(self):
return self._name
@property
def state(self):
if not self._dataset:
print("Dataset needs to be generated before you can access its state.")
return self._dataset["state"]
@property
def config(self):
return
def view(self):
return | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/client.py | client.py |
import logging
from pathlib import Path
from typing import Union
import gin
import zpy
log = logging.getLogger(__name__)
class ZUMOParseError(Exception):
"""Invalid ZUMO Annotation found when parsing data contents."""
pass
@gin.configurable
class OutputZUMO(zpy.output.Output):
"""Output class for ZUMO style annotations.
The ZUMO format is basically a serialized version of zpy.saver.Saver.
"""
ANNOTATION_FILENAME = Path("_annotations.zumo.json")
def __init__(self, *args, **kwargs) -> Path:
super().__init__(*args, annotation_filename=self.ANNOTATION_FILENAME, **kwargs)
def output_annotations(
self,
annotation_path: Union[Path, str] = None,
) -> Path:
"""Output annotations to file.
Args:
annotation_path (Union[Path, str], optional): Output path for annotation file.
Returns:
Path: Path to annotation file.
"""
annotation_path = super().output_annotations(annotation_path=annotation_path)
zumo_dict = {
"metadata": self.saver.metadata,
"categories": self.saver.categories,
"images": self.saver.images,
"annotations": self.saver.annotations,
}
# Write out annotations to file
zpy.files.write_json(annotation_path, zumo_dict)
# Verify annotations
parse_zumo_annotations(
annotation_file=annotation_path, data_dir=self.saver.output_dir
)
return annotation_path
@gin.configurable
def parse_zumo_annotations(
annotation_file: Union[Path, str],
data_dir: Union[Path, str] = None,
output_saver: bool = False,
) -> zpy.saver_image.ImageSaver:
"""Parse COCO annotations, optionally output a ImageSaver object.
Args:
annotation_file (Union[Path, str]): Path to annotation file.
data_dir (Union[Path, str], optional): Directory containing data (images, video, etc).
output_saver (bool, optional): Whether to return a Saver object or not. Defaults to False.
Raises:
ZUMOParseError: Various checks on annotations, categories, images
Returns:
zpy.saver.Saver: Saver object.
"""
log.info(f"Parsing ZUMO annotations at {annotation_file}...")
# Check annotation file path
annotation_file = zpy.files.verify_path(annotation_file)
if data_dir is not None:
data_dir = zpy.files.verify_path(data_dir, check_dir=True)
else:
# If no data_dir, assume annotation file is in the root folder.
data_dir = annotation_file.parent
zumo_metadata = zpy.files.read_json(annotation_file)
images = zumo_metadata["images"]
if len(images.keys()) == 0:
raise ZUMOParseError(f"no images found in {annotation_file}")
categories = zumo_metadata["categories"]
if len(categories.keys()) == 0:
raise ZUMOParseError(f"no categories found in {annotation_file}")
annotations = zumo_metadata["annotations"]
if len(annotations) == 0:
raise ZUMOParseError(f"no annotations found in {annotation_file}")
log.info(
f"images:{len(images)} categories:{len(categories)} annotations:{len(annotations)}"
)
# Optionally output a saver object.
if output_saver:
saver = zpy.saver_image.ImageSaver(
output_dir=data_dir,
annotation_path=annotation_file,
description=zumo_metadata["metadata"]["description"],
clean_dir=False,
)
# Check Images
log.info("Parsing images...")
img_ids = []
for image_id, img in images.items():
# HACK: JSON will convert int keys to str, so undo that here
image_id = int(image_id)
# Image ID
if not image_id == img["id"]:
raise ZUMOParseError(
f"image id {image_id} does not match image dict key {img['id']}"
)
if not isinstance(image_id, int):
raise ZUMOParseError(f"image id {image_id} must be int.")
if image_id in img_ids:
raise ZUMOParseError(f"image id {image_id} already used.")
img_ids.append(image_id)
if image_id < 0:
raise ZUMOParseError(f"invalid image id {image_id}")
# Frame
frame = img.get("frame", None)
if frame is not None:
if not isinstance(frame, int):
raise ZUMOParseError(f"frame {frame} must be int.")
if image_id < 0:
raise ZUMOParseError(f"invalid image frame {frame}")
# Height and Width
height, width = img["height"], img["width"]
if not isinstance(height, int):
raise ZUMOParseError(f"height {height} must be int.")
if not isinstance(width, int):
raise ZUMOParseError(f"width {width} must be int.")
if height <= 0 or width <= 0:
raise ZUMOParseError(f"width and height h:{height} w:{width} must be > 0")
# Name
name = img.get("name", None)
if name is not None:
if not isinstance(name, str):
raise ZUMOParseError(f"name {name} must be str.")
if (
frame is not None
and (not zpy.files.id_from_image_name(name) == frame)
and (not zpy.files.id_from_image_name(name) == image_id)
):
raise ZUMOParseError(
f"name {name} does not correspond to"
f" frame {frame} or image_id {image_id}."
)
# Output path
output_path = img.get("output_path", None)
if output_path is not None:
if not isinstance(output_path, str):
raise ZUMOParseError(f"output_path {output_path} must be str.")
if not Path(output_path).exists():
raise ZUMOParseError(f"output_path {output_path} does not exist")
# Save each image to ImageSaver object
if output_saver:
saver.images[image_id] = img
# Check Categories
log.info("Parsing categories...")
cat_ids = []
cat_names = []
for category_id, category in categories.items():
# Category Name
category_name = category["name"]
if not isinstance(category_name, str):
raise ZUMOParseError(f"category_name {category_name} must be str.")
if category_name in cat_names:
raise ZUMOParseError(f"category_name {category_name} already used")
cat_names.append(category_name)
# HACK: JSON will convert int keys to str, so undo that here
category_id = int(category_id)
# Category ID
if not category_id == category["id"]:
raise ZUMOParseError(
f"category_id {category_id} does not match category dict key {category['id']}"
)
if not isinstance(image_id, int):
raise ZUMOParseError(f"category_id {category_id} must be int.")
if category_id in cat_ids:
raise ZUMOParseError(f"category id {category_id} already used")
cat_ids.append(category_id)
# Supercategories
if category.get("supercategory", None) is not None:
pass
if category.get("supercategories", None) is not None:
pass
# Subcategories
if category.get("subcategory", None) is not None:
pass
if category.get("subcategories", None) is not None:
pass
# Keypoints
if category.get("keypoints", None) is not None:
keypoints = category["keypoints"]
log.info(f"{len(keypoints)} keypoints:{keypoints}")
if category.get("skeleton", None) is None:
raise ZUMOParseError(f"skeleton must be present with {keypoints}")
# Save each category to ImageSaver object
if output_saver:
saver.categories[category_id] = category
# Check Annotations
log.info("Parsing annotations...")
ann_ids = []
for annotation in annotations:
# IDs
image_id, category_id, annotation_id = (
annotation["image_id"],
annotation["category_id"],
annotation["id"],
)
if image_id not in img_ids:
raise ZUMOParseError(f"annotation image id {image_id} not in {img_ids}")
if category_id not in cat_ids:
raise ZUMOParseError(
f"annotation category id {category_id} not in {cat_ids}"
)
if annotation_id in ann_ids:
raise ZUMOParseError(f"annotation id {annotation_id} already used")
ann_ids.append(annotation_id)
# Bounding Boxes
bbox = annotation.get("bbox", None)
if bbox is not None:
pass
# Keypoints
keypoints = annotation.get("num_keypoints", None)
if keypoints is not None:
if "keypoints_xyv" in annotation:
if (
len(annotation["keypoints_xyv"])
!= int(annotation["num_keypoints"]) * 3
):
raise ZUMOParseError(
"keypoints_xyv not correct size {len(keypoints)}"
)
if "keypoints_xyz" in annotation:
if (
len(annotation["keypoints_xyz"])
!= int(annotation["num_keypoints"]) * 3
):
raise ZUMOParseError(
"keypoints_xyz not correct size {len(keypoints)}"
)
# Save each annotation to ImageSaver object
if output_saver:
saver.annotations.append(annotation)
if output_saver:
return saver
else:
return None | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/output_zumo.py | output_zumo.py |
import logging
from pathlib import Path
from typing import Union
import gin
import zpy
log = logging.getLogger(__name__)
class MOTParseError(Exception):
"""Invalid MOT Annotation found when parsing data contents."""
pass
@gin.configurable
class OutputMOT(zpy.output.Output):
"""Output class for MOT (Multi-Object Tracking) style annotations.
https://motchallenge.net/faq/
"""
ANNOTATION_FILENAME = Path("_annotations.mot.csv")
def __init__(self, *args, **kwargs) -> Path:
super().__init__(*args, annotation_filename=self.ANNOTATION_FILENAME, **kwargs)
@gin.configurable
def output_annotations(
self,
annotation_path: Union[Path, str] = None,
) -> Path:
"""Output MOT (Multi-Object Tracking) annotations to file.
Args:
annotation_path (Union[Path, str], optional): Output path for annotation file.
Returns:
Path: Path to annotation file.
"""
annotation_path = super().output_annotations(annotation_path=annotation_path)
mot = []
for annotation in self.saver.annotations:
if self.saver.images[annotation["image_id"]]["style"] != "default":
# MOT annotations only have image annotations
# for RGB images. No segmentation images.
continue
person_id = annotation.get("person_id", None)
bbox = annotation.get("bbox", None)
if (person_id is not None) and (bbox is not None):
# Each CSV row will have 9 entries
row = [0] * 9
# Frame at which the object is present
row[0] = annotation["frame_id"]
# Pedestrian trajectory is identiο¬ed by a unique ID
row[1] = person_id
# Coordinate of the top-left corner of the pedestrian bounding box
row[2] = bbox[0]
# Coordinate of the top-left corner of the pedestrian bounding box
row[3] = bbox[1]
# Width in pixels of the pedestrian bounding box
row[4] = bbox[2]
# Height in pixels of the pedestrian bounding box
row[5] = bbox[3]
# Flag whether the entry is to be considered (1) or ignored (0).
row[6] = 1
# TODO: Type of object annotated
"""
MOT Types:
Pedestrian 1
Person on vehicle 2
Car 3
Bicycle 4
Motorbike 5
Non motorized vehicle 6
Static person 7
Distractor 8
Occluder 9
Occluder on the ground 10
Occluder full 11
Reflection 12
Crowd 13
"""
row[7] = annotation["mot_type"]
# TODO: Visibility ratio, a number between 0 and 1 that says how much of that object
# is visible. Can be due to occlusion and due to image border cropping.
row[8] = 1.0
# Add to mot list
mot.append(row)
# Write out annotations to file
zpy.files.write_csv(annotation_path, mot)
# Verify annotations
parse_mot_annotations(annotation_path)
return annotation_path
@gin.configurable
def parse_mot_annotations(
annotation_file: Union[Path, str],
) -> None:
"""Parse MOT annotations.
Args:
annotation_file (Union[Path, str]): Path to annotation file.
Raises:
MOTParseError: Rows are not length 9.
"""
log.info(f"Verifying MOT annotations at {annotation_file}...")
mot = zpy.files.read_csv(annotation_file)
for row in mot:
if not len(row) == 9:
raise MOTParseError(
f"Each row in MOT csv must have len 9, found len {len(row)}"
)
# TODO: Return Saver object. | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/output_mot.py | output_mot.py |
import copy
import logging
from pathlib import Path
from typing import List, Union
import gin
import zpy
log = logging.getLogger(__name__)
class COCOParseError(Exception):
"""Invalid COCO Annotation found when parsing data contents."""
pass
@gin.configurable
class OutputCOCO(zpy.output.Output):
"""Output class for COCO style annotations.
https://cocodataset.org/#home
"""
ANNOTATION_FILENAME = Path("_annotations.coco.json")
def __init__(self, *args, **kwargs) -> Path:
super().__init__(*args, annotation_filename=self.ANNOTATION_FILENAME, **kwargs)
@gin.configurable
def output_annotations(
self,
annotation_path: Union[Path, str] = None,
splitseg: bool = False,
) -> Path:
"""Output COCO annotations to file.
Args:
annotation_path (Union[Path, str], optional): Output path for annotation file.
splitseg (bool, optional): Optionally output split-segmentation annotations. Defaults to False.
Returns:
Path: Path to annotation file.
"""
annotation_path = super().output_annotations(annotation_path=annotation_path)
coco_dict = {
"info": self.coco_info(),
"licenses": self.coco_license(),
"categories": self.coco_categories(),
"images": self.coco_images(),
"annotations": self.coco_annotations(),
}
# Write out annotations to file
zpy.files.write_json(annotation_path, coco_dict)
parse_coco_annotations(annotation_path, data_dir=self.saver.output_dir)
# Output split-segmentation annotations
if splitseg:
log.info(
"Outputting COCO annotations with multi-part"
+ "segmentation split into seperate annotations"
)
coco_dict["annotations"] = self.coco_split_segmentation_annotations()
annotation_path = zpy.files.add_to_path(annotation_path, "splitseg")
# Write out annotations to file
zpy.files.write_json(annotation_path, coco_dict)
parse_coco_annotations(annotation_path, data_dir=self.saver.output_dir)
return annotation_path
@gin.configurable
def coco_info(self, keys_to_add: List[str] = None):
"""coco info"""
coco_info = {
"description": self.saver.metadata["description"],
"url": self.saver.metadata["url"],
"version": self.saver.metadata["date_created"],
"year": self.saver.metadata["year"],
"contributor": self.saver.metadata["contributor"],
"date_created": self.saver.metadata["date_created"],
"save_path": self.saver.metadata["save_path"],
}
# Add any extra keys.
if keys_to_add is not None:
for key in keys_to_add:
value = self.saver.metadata.get(key, None)
if value is not None:
coco_info[key] = value
return coco_info
def coco_license(self):
"""coco license"""
return {
"url": "http://zumolabs.ai/image_license/",
"id": 0,
"name": "Zumo Labs Image License",
}
@gin.configurable
def coco_categories(
self,
keys_to_add: List[str] = [
"keypoints",
"skeleton",
"supercategory",
"subcategories",
],
):
"""coco categories"""
coco_categories = []
for category in self.saver.categories.values():
coco_category = {
"id": category["id"],
"name": category["name"],
}
# Add any extra keys.
if keys_to_add is not None:
for key in keys_to_add:
value = category.get(key, None)
if value is not None:
coco_category[key] = value
coco_categories.append(coco_category)
return coco_categories
@gin.configurable
def coco_images(
self,
only_default_images: bool = True,
keys_to_add: List[str] = None,
):
"""coco images"""
coco_images = []
for image in self.saver.images.values():
if only_default_images and not image["style"] == "default":
# COCO annotations only have image annotations
# for RGB images. No segmentation images.
continue
coco_img = {
"license": 0,
"id": image["id"],
"file_name": image["name"],
"coco_url": image["name"],
"width": image["width"],
"height": image["height"],
"date_captured": self.saver.metadata["date_created"],
"flickr_url": ".",
}
# Add any extra keys.
if keys_to_add is not None:
for key in keys_to_add:
value = image.get(key, None)
if value is not None:
coco_img[key] = value
coco_images.append(coco_img)
return coco_images
@gin.configurable
def coco_annotations(
self,
keys_to_add: List[str] = ["bbox", "area", "segmentation"],
clipped: bool = True,
only_default_images: bool = True,
):
"""coco annotations"""
coco_annotations = []
for annotation in self.saver.annotations:
if (
only_default_images
and not self.saver.images[annotation["image_id"]]["style"] == "default"
):
# COCO annotations only have image annotations
# for RGB images. No segmentation images.
continue
coco_ann = {
"category_id": annotation["category_id"],
"image_id": annotation["image_id"],
"id": annotation["id"],
"iscrowd": False,
}
if clipped:
height = self.saver.images[annotation["image_id"]]["height"]
width = self.saver.images[annotation["image_id"]]["width"]
# Add any extra keys.
if keys_to_add is not None:
for key in keys_to_add:
value = annotation.get(key, None)
if value is not None:
if key == "segmentation":
coco_ann["segmentation"] = (
self.saver.clip_coordinate_list(
width=width,
height=height,
annotation=annotation["segmentation"],
)
if clipped
else annotation["segmentation"]
)
elif key == "segmentation_rle":
coco_ann["segmentation_rle"] = annotation[
"segmentation_rle"
]
elif key == "segmentation_float":
coco_ann["segmentation_float"] = (
self.saver.clip_coordinate_list(
normalized=True,
annotation=annotation["segmentation_float"],
)
if clipped
else annotation["segmentation_float"]
)
elif key == "bbox_float":
coco_ann["bbox_float"] = (
self.saver.clip_bbox(
normalized=True, bbox=annotation["bbox_float"]
)
if clipped
else annotation["bbox_float"]
)
elif key == "bbox":
coco_ann["bbox"] = (
self.saver.clip_bbox(
width=width, height=height, bbox=annotation["bbox"]
)
if clipped
else annotation["bbox"]
)
elif key == "bboxes_float":
coco_ann["bboxes_float"] = (
[
self.saver.clip_bbox(normalized=True, bbox=bbox)
for bbox in annotation["bboxes_float"]
]
if clipped
else annotation["bboxes_float"]
)
elif key == "bboxes":
coco_ann["bboxes"] = (
[
self.saver.clip_bbox(
height=height, width=width, bbox=bbox
)
for bbox in annotation["bboxes"]
]
if clipped
else annotation["bboxes"]
)
else:
coco_ann[key] = value
try:
if key == "area":
coco_ann["area"] = (
annotation["bbox"][2] * annotation["bbox"][3]
)
elif key == "areas":
coco_ann["areas"] = [
bbox[2] * bbox[3] for bbox in annotation["bboxes"]
]
except Exception:
pass
# HACK: Require bbox for an annotation
if coco_ann.get("bbox", None) is not None:
coco_annotations.append(coco_ann)
return coco_annotations
@gin.configurable
def coco_split_segmentation_annotations(
self,
keys_to_add: List[str] = ["bbox", "area", "segmentation"],
clipped: bool = True,
only_default_images: bool = True,
):
"""coco annotations one per segmentation"""
coco_annotations = []
# Annotation id will be re-mapped
annotation_id = 0
for annotation in self.saver.annotations:
if (
only_default_images
and not self.saver.images[annotation["image_id"]]["style"] == "default"
):
# COCO annotations only have image annotations
# for RGB images. No segmentation images.
continue
coco_ann = {
"category_id": annotation["category_id"],
"image_id": annotation["image_id"],
"iscrowd": False,
}
if clipped:
height = self.saver.images[annotation["image_id"]]["height"]
width = self.saver.images[annotation["image_id"]]["width"]
# Add any extra keys.
if keys_to_add is not None:
for key in keys_to_add:
value = annotation.get(key, None)
if value is not None:
coco_ann[key] = value
# Annotations can be composed of multiple annotation components
if annotation.get("segmentation") is not None:
num_components = len(annotation["segmentation"])
else:
log.warning(
"Skipping annotation: split segmentation requires segmentaiton field."
)
continue
# TODO: This can prolly be cleaned up?
for i in range(num_components):
_coco_ann = copy.deepcopy(coco_ann)
try:
_coco_ann["segmentation"] = (
[
self.saver.clip_coordinate_list(
height=height,
width=width,
annotation=annotation["segmentation"][i],
)
]
if clipped
else [annotation["segmentation"][i]]
)
except Exception:
pass
try:
_coco_ann["segmentation_rle"] = [annotation["segmentation_rle"][i]]
except Exception:
pass
try:
_coco_ann["segmentation_float"] = (
[
self.saver.clip_coordinate_list(
normalized=True,
annotation=annotation["segmentation_float"][i],
)
]
if clipped
else [annotation["segmentation_float"][i]]
)
except Exception:
pass
try:
_coco_ann["bbox_float"] = (
self.saver.clip_bbox(
normalized=True, bbox=annotation["bboxes_float"][i]
)
if clipped
else annotation["bboxes_float"][i]
)
except Exception:
pass
try:
_coco_ann["bbox"] = (
self.saver.clip_bbox(
width=width, height=height, bbox=annotation["bboxes"][i]
)
if clipped
else annotation["bboxes"][i]
)
except Exception:
pass
try:
_coco_ann["area"] = annotation["areas"][i]
except Exception:
pass
# HACK: Require bbox for an annotation
if _coco_ann.get("bbox", None) is not None:
_coco_ann["id"] = annotation_id
annotation_id += 1
coco_annotations.append(_coco_ann)
return coco_annotations
@gin.configurable
def parse_coco_annotations(
annotation_file: Union[Path, str],
data_dir: Union[Path, str] = None,
output_saver: bool = False,
# Specify which keys to add to ImageSaver
image_keys_to_add: List[str] = None,
) -> zpy.saver_image.ImageSaver:
"""Parse COCO annotations, optionally output a ImageSaver object.
Args:
annotation_file (Union[Path, str]): Path to annotation file.
data_dir (Union[Path, str], optional): Directory containing data (images, video, etc).
output_saver (bool, optional): Whether to return a Saver object or not. Defaults to False.
image_keys_to_add (List[str], optional): Image dictionary keys to include when parsing COCO dict.
Raises:
COCOParseError: Various checks on annotations, categories, images
Returns:
zpy.saver_image.ImageSaver: Saver object for Image datasets.
"""
log.info(f"Parsing COCO annotations at {annotation_file}...")
# Check annotation file path
annotation_file = zpy.files.verify_path(annotation_file)
if data_dir is not None:
data_dir = zpy.files.verify_path(data_dir, check_dir=True)
else:
# If no data_dir, assume annotation file is in the root folder.
data_dir = annotation_file.parent
# Check that categories, images, and annotations are not blank
coco_annotations = zpy.files.read_json(annotation_file)
images = coco_annotations["images"]
if len(images) == 0:
raise COCOParseError(f"no images found in {annotation_file}")
categories = coco_annotations["categories"]
if len(categories) == 0:
raise COCOParseError(f"no categories found in {annotation_file}")
annotations = coco_annotations["annotations"]
if len(annotations) == 0:
raise COCOParseError(f"no annotations found in {annotation_file}")
log.info(
f"images:{len(images)} categories:{len(categories)} annotations:{len(annotations)}"
)
# Optionally output a saver object.
if output_saver:
saver = zpy.saver_image.ImageSaver(
output_dir=data_dir,
annotation_path=annotation_file,
description=coco_annotations["info"]["description"],
clean_dir=False,
)
# Check Images
log.info("Parsing images...")
img_ids = []
for img in images:
# Image ID
image_id = img["id"]
if not isinstance(image_id, int):
raise COCOParseError(f"image id {image_id} must be int.")
if image_id in img_ids:
raise COCOParseError(f"image id {image_id} already used.")
img_ids.append(image_id)
if image_id < 0:
raise COCOParseError(f"invalid image id {image_id}")
# Height and Width
height, width = img["height"], img["width"]
if not isinstance(height, int):
raise COCOParseError(f"height {height} must be int.")
if not isinstance(width, int):
raise COCOParseError(f"width {width} must be int.")
if height <= 0 or width <= 0:
raise COCOParseError(f"width and height h:{height} w:{width} must be > 0")
# Image Name
filename = img["file_name"]
if not isinstance(filename, str):
raise COCOParseError(f"filename {filename} must be str.")
image_path = data_dir / filename
if not image_path.exists():
raise COCOParseError(f"image path {image_path} does not exist")
# COCO Path
coco_url = img.get("coco_url", None)
if coco_url is None:
coco_url = filename
coco_url = Path(coco_url)
coco_path = data_dir / coco_url
if not coco_path.exists():
raise COCOParseError(f"coco url {coco_path} does not exist")
# Save each image to ImageSaver object
if output_saver:
saver.images[image_id] = {
"id": image_id,
"name": filename,
"output_path": str(coco_url),
"height": height,
"width": width,
"style": "default",
}
# Add any extra keys.
if image_keys_to_add is not None:
for key in image_keys_to_add:
value = img.get(key, None)
if value is not None:
saver.images[image_id][key] = value
# Check Categories
log.info("Parsing categories...")
cat_ids = []
cat_names = []
for category in categories:
name, category_id = category["name"], category["id"]
log.info(f"name:{name} id:{category_id}")
# Category Name
category_name = category["name"]
if not isinstance(category_name, str):
raise COCOParseError(f"category_name {category_name} must be str.")
if category_name in cat_names:
raise COCOParseError(f"category_name {category_name} already used")
cat_names.append(category_name)
# Category ID
category_id = category["id"]
if not isinstance(image_id, int):
raise COCOParseError(f"category_id {category_id} must be int.")
if category_id in cat_ids:
raise COCOParseError(f"category id {category_id} already used")
cat_ids.append(category_id)
# Supercategories
if category.get("supercategory", None) is not None:
pass
if category.get("supercategories", None) is not None:
pass
# Subcategories
if category.get("subcategory", None) is not None:
pass
if category.get("subcategories", None) is not None:
pass
# Keypoints
if category.get("keypoints", None) is not None:
keypoints = category["keypoints"]
log.info(f"{len(keypoints)} keypoints:{keypoints}")
if category.get("skeleton", None) is None:
raise COCOParseError(f"skeleton must be present with {keypoints}")
# Save each category to ImageSaver object
if output_saver:
_category = saver.categories.get(category_id, None)
if _category is None:
saver.categories[category_id] = {}
for key, value in category.items():
saver.categories[category_id][key] = value
# Check Annotations
log.info("Parsing annotations...")
ann_ids = []
for annotation in annotations:
# IDs
image_id, category_id, annotation_id = (
annotation["image_id"],
annotation["category_id"],
annotation["id"],
)
if image_id not in img_ids:
raise COCOParseError(f"annotation img:{image_id} not in {img_ids}")
if category_id not in cat_ids:
raise COCOParseError(f"annotation cat:{category_id} not in {cat_ids}")
if annotation_id in ann_ids:
raise COCOParseError(f"annotation id:{annotation_id} already used")
ann_ids.append(annotation_id)
# Bounding Boxes
bbox = annotation.get("bbox", None)
if bbox is not None:
pass
# Keypoints
keypoints = annotation.get("num_keypoints", None)
if keypoints is not None:
if "keypoints_xyv" in annotation:
if (
len(annotation["keypoints_xyv"])
!= int(annotation["num_keypoints"]) * 3
):
raise COCOParseError(
"keypoints_xyv not correct size {len(keypoints)}"
)
if "keypoints_xyz" in annotation:
if (
len(annotation["keypoints_xyz"])
!= int(annotation["num_keypoints"]) * 3
):
raise COCOParseError(
"keypoints_xyz not correct size {len(keypoints)}"
)
# Save each annotation to ImageSaver object
if output_saver:
saver.annotations.append(annotation)
if output_saver:
return saver
else:
return None | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/output_coco.py | output_coco.py |
import csv
import json
import logging
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import zipfile
from pathlib import Path
from pprint import pformat
from typing import Any, Dict, List, Union
log = logging.getLogger(__name__)
"""
Dictionary of filename extensions and prefix/suffixes
These serve as the master search patterns so update and use
these as much as possible to prevent bugs.
Order matters! More specific regexes at the top and
catch-alls at the bottom.
You can test these out at: https://regex101.com/
"""
IMAGE_REGEX = ".*\.(jpeg|jpg|png|bmp)"
FILE_REGEX = {
# Images
"instance segmentation image": ".*iseg" + IMAGE_REGEX,
"class segmentation image": ".*cseg" + IMAGE_REGEX,
"depth image": ".*depth" + IMAGE_REGEX,
"normal image": ".*normal" + IMAGE_REGEX,
"stereo left image": ".*.stereoL" + IMAGE_REGEX,
"stereo right image": ".*.stereoR" + IMAGE_REGEX,
"rgb image": ".*rgb" + IMAGE_REGEX,
"image": IMAGE_REGEX,
# Annotations
"zumo annotation": "_annotations.zumo.json",
"coco annotation": ".*coco.*\.json",
"annotation": ".*\.(json|xml|yaml|csv)",
}
def dataset_contents(
path: Union[Path, str],
filetype_regex: Dict = FILE_REGEX,
) -> Dict:
"""Use regex to search inside a data directory.
Args:
path (Union[Path, str]): Directory filepath.
filetype_regex (Dict, optional): dictionary of {filetype : regex}
Returns:
Dict: Contents of directory.
"""
path = verify_path(path, check_dir=True, make=False)
contents = {
"dirs": [],
}
for dirpath, _, files in os.walk(path):
contents["dirs"].append(dirpath)
for filename in files:
for name, re_pattern in filetype_regex.items():
if re.search(re_pattern, filename):
if contents.get(name, None) is None:
contents[name] = []
contents[name].append(os.path.join(dirpath, filename))
break
return contents
def file_is_of_type(
path: Union[Path, str],
filetype: str,
) -> bool:
"""Check to if file is of type given.
Args:
path (Union[Path, str]): A filesystem path.
filetype (str): Type of file (see FILE_REGEX dict in zpy.files)
Returns:
bool: File is that type.
"""
if isinstance(path, Path):
path = str(path)
assert (
FILE_REGEX.get(filetype, None) is not None
), f"{filetype} must be in {FILE_REGEX.keys()}"
if re.search(FILE_REGEX[filetype], path):
return True
return False
def make_rgb_image_name(id: int, extension: str = ".png") -> str:
"""Creates a RGB image name given an integer id.
Args:
id (int): Integer id used in name creation.
extension (str, optional): Extension for image. Defaults to '.png'.
Returns:
str: Image name.
"""
return "image.%06d.rgb" % id + extension
def make_cseg_image_name(id: int, extension: str = ".png") -> str:
"""Return category (class) segmentation image name from integer id.
Args:
id (int): Integer id used in name creation.
extension (str, optional): Extension for image. Defaults to '.png'.
Returns:
str: Image name.
"""
return "image.%06d.cseg" % id + extension
def make_iseg_image_name(id: int, extension: str = ".png") -> str:
"""Return instance segmentation image name from integer id.
Args:
id (int): Integer id used in name creation.
extension (str, optional): Extension for image. Defaults to '.png'.
Returns:
str: Image name.
"""
return "image.%06d.iseg" % id + extension
def make_depth_image_name(id: int, extension: str = ".png") -> str:
"""Return depth image name from integer id.
Args:
id (int): Integer id used in name creation.
extension (str, optional): Extension for image. Defaults to '.png'.
Returns:
str: Image name.
"""
return "image.%06d.depth" % id + extension
def make_custom_image_name(id: int, name: str, extension: str = ".png") -> str:
"""Creates a custom image name given integer id and name.
Args:
id (int): Integer id used in name creation.
name (str): Custom string which will be appended to the end of the image name.
extension (str, optional): Extension for image. Defaults to '.png'.
Returns:
str: Image name.
"""
return "image.%06d.%s" % (id, name) + extension
def id_from_image_name(image_name: str) -> int:
"""Extract integer id from image name.
Args:
image_name (str): Name of image to extract integer from.
Returns:
int: Integer id.
"""
return int("".join([s for s in image_name if s.isdigit()]))
def replace_id_in_image_name(image_name: str, new_id: int) -> str:
"""Replace the integer id in an image name.
Args:
image_name (str): Name of the image.
new_id (int): New id to replace old id with.
Returns:
str: New image name.
"""
# HACK: This will break for image names without 8-digit indices
return "image.%06d" % new_id + image_name[12:]
def add_to_path(path: Union[Path, str], name: str) -> Path:
"""Add string descriptor to path: foo.txt -> foo.more_foo.txt
Args:
path (Union[Path, str]): A filesystem path.
name (str): Name to append to file name.
Returns:
Path: New path.
"""
path = to_pathlib_path(path)
underscore_filename = path.stem + "_" + name + path.suffix
return path.parent / Path(underscore_filename)
def to_pathlib_path(path: Union[Path, str]) -> Path:
"""Convert string path to pathlib.Path if needed.
Args:
path (Union[Path, str]): A filesystem path.
Returns:
Path: Path in pathlib.Path format.
"""
if not isinstance(path, Path):
path = Path(os.path.expandvars(path)).resolve()
return path
def default_temp_path() -> Path:
"""Default temporary path agnostic to OS.
Returns:
Path: Path to a new output folder in the temp path.
"""
return Path(tempfile.gettempdir()) / "output"
def clean_dir(
path: Union[Path, str],
keep_dir: bool = True,
) -> None:
"""Delete everything at the provided directory.
Args:
path (Union[Path, str]): Path to directory.
keep_dir (bool, optional): Whether to keep (or delete) the directory itself. Defaults to True.
"""
path = verify_path(path, make=False, check_dir=True)
if keep_dir:
# Delete the contents, but keep the directory
for _path in path.iterdir():
try:
if _path.is_file() or _path.is_symlink():
_path.unlink()
elif _path.is_dir():
shutil.rmtree(_path)
except Exception as e:
log.warning("Failed to delete %s. Reason: %s" % (_path, e))
else:
# Delete everything, including the directory itself
shutil.rmtree(path)
def pretty_print(d: Dict) -> str:
"""Pretty formatted dictionary.
Args:
d (Dict): Dictionary to be pretty printed
Returns:
str: Dictionary in pretty format.
"""
return pformat(d, indent=2, width=120)
def verify_path(
path: Union[Path, str],
make: bool = False,
check_dir: bool = False,
) -> Path:
"""Checks to make sure Path exists and optionally creates it.
Args:
path (Union[Path, str]): A filesystem path.
make (bool, optional): Make the path if it does not exist. Defaults to False.
check_dir (bool, optional): Throw error is path is not a directory. Defaults to False.
Raises:
ValueError: Path is not a directory (only if check_dir is set to True)
Returns:
Path: The same path.
"""
path = to_pathlib_path(path)
if not path.exists():
log.warning(f"Could not find path at {path}")
if make:
log.info(f"Making {path.name} dir at {path}")
path.mkdir(exist_ok=True, parents=True)
else:
log.debug(f"Path found at {path}.")
if check_dir and not path.is_dir():
raise ValueError(f"Path at {path} is not a directory.")
return path
def write_json(
path: Union[Path, str],
data: Union[Dict, List],
) -> None:
"""Save data to json file.
Args:
path (Union[Path, str]): Path to output json.
data (Union[Dict, List]): Data to save.
Raises:
ValueError: Path is not a json file.
"""
path = to_pathlib_path(path)
if not path.suffix == ".json":
raise ValueError(f"{path} is not a JSON file.")
log.info(f"Writing JSON to file {path}")
with path.open("w") as f:
json.dump(data, f, indent=4)
def read_json(
path: Union[Path, str],
) -> Union[Dict, List]:
"""Read a json from a path.
Args:
path (Union[Path, str]): A filesystem path.
Raises:
ValueError: Path is not a json file.
Returns:
Union[Dict, List]: Data in the json.
"""
path = to_pathlib_path(path)
if not path.suffix == ".json":
raise ValueError(f"{path} is not a JSON file.")
log.info(f"Reading JSON file at {path}")
with path.open() as f:
data = json.load(f)
return data
def write_csv(
path: Union[Path, str], data: List[List[Any]], delimiter: str = ",", **kwargs
) -> None:
"""Write data to csv.
Pass in additional kwargs to the csv writer.
Args:
path (Union[Path, str]): A filesystem path.
data (List[List[Any]]): Data to save.
delimiter (str, optional): Delimiter for each row of csv. Defaults to ','.
Raises:
ValueError: Path is not a csv or txt file.
"""
path = to_pathlib_path(path)
if path.suffix not in [".csv", ".txt"]:
raise ValueError(f"{path} is not a CSV file.")
log.info(f"Writing CSV to file {path}")
with path.open("w") as f:
writer = csv.writer(f, delimiter=delimiter, **kwargs)
writer.writerows(data)
def read_csv(path: Union[Path, str], delimiter: str = ",", **kwargs) -> List[List[Any]]:
"""Read csv data from a path.
Pass in additional kwargs to the csv reader.
Args:
path (Union[Path, str]): A filesystem path.
delimiter (str, optional): Delimiter for each row of csv. Defaults to ','.
Raises:
ValueError: Path is not a csv or txt file.
Returns:
List[List[Any]]: Data in the csv.
"""
path = to_pathlib_path(path)
if path.suffix not in [".csv", ".txt"]:
raise ValueError(f"{path} is not a CSV file.")
log.info(f"Reading CSV file at {path}")
data = []
with path.open() as f:
for row in csv.reader(f, delimiter=delimiter, **kwargs):
data.append(row)
return data
def pick_random_from_dir(
dir_path: Union[Path, str],
suffixes: List[str] = [".txt"],
) -> Path:
"""Pick random file of suffix in a directory.
Args:
dir_path (Union[Path, str]): Path to the directory containing files.
suffixes (List[str], optional): Filter files by these suffixes. Defaults to [].
Returns:
Path: Path to randomly chosen file with given suffix.
"""
_paths = []
for _path in dir_path.iterdir():
if _path.is_file() and _path.suffix in suffixes:
_paths.append(_path)
_path = random.choice(_paths)
log.debug(f"Found {len(_paths)} files with suffix {suffixes} at {dir_path}")
log.info(f"Randomly chose {_path}")
return _path
def sample(
things: List,
sample_size: int = None,
) -> List:
"""Sample N things from a list.
Args:
things (List): List of things.
sample_size (int, optional): Sample size N. Defaults to length of things.
Returns:
List: New sample of things.
"""
random_sample_size = len(things)
if sample_size is not None:
random_sample_size = min(sample_size, len(things))
if random_sample_size == len(things):
sample_images = things
else:
sample_images = random.sample(things, random_sample_size)
return sample_images
def filecopy(
src_path: Union[Path, str],
dst_path: Union[Path, str],
) -> None:
"""Copy file from source (src) to destination (dst).
Args:
src_path (Union[Path, str]): Source filesystem path.
dst_path (Union[Path, str]): Destination filesystem path.
"""
src_path = verify_path(src_path)
dst_path = verify_path(dst_path)
log.debug(f"Copying over file from {src_path} to {dst_path}")
shutil.copy(src_path, dst_path)
def open_folder_in_explorer(
path: Union[Path, str],
make: bool = False,
) -> None:
"""Opens a directory in the fileexplorer of your OS.
Args:
path (Union[Path, str]): Filesystem path.
make (bool, optional): Make directory if it doesn't exist. Defaults to False.
"""
path = verify_path(path, check_dir=True, make=make)
if sys.platform.startswith("darwin"):
subprocess.call(("open", path))
elif os.name == "nt":
os.startfile(path)
elif os.name == "posix":
subprocess.call(("xdg-open", path))
def remove_files_with_suffix(
path: Union[Path, str],
exts: List[str],
) -> None:
"""Remove file in a path with certain extension.
Args:
path (Union[Path, str]): Directory path.
exts (List[str]): List of extensions to remove
"""
path = verify_path(path, check_dir=True)
for _path in path.glob("*"):
if _path.suffix in exts:
log.info(f"Removing file at {_path}")
_path.unlink()
def unzip_file(
zip_path: Union[Path, str],
out_path: Union[Path, str],
) -> None:
"""Unzip a file to an output path.
Args:
zip_path (Union[Path, str]): Path to zip file.
out_path (Union[Path, str]): Path to output directory.
Raises:
ValueError: Path isn't a zip.
"""
log.info(f"Unzipping {zip_path} to {out_path}...")
zip_path = verify_path(zip_path)
out_path = verify_path(out_path, check_dir=True)
if not zip_path.suffix == ".zip":
raise ValueError(f"{zip_path} is not a zip file")
zf = zipfile.ZipFile(str(zip_path))
zipped_size_mb = round(sum([i.compress_size for i in zf.infolist()]) / 1024 / 1024)
unzipped_size_mb = round(sum([i.file_size for i in zf.infolist()]) / 1024 / 1024)
log.info(f"Compressed: {zipped_size_mb}MB, actual: {unzipped_size_mb}MB.")
zf.extractall(out_path)
log.info(f"Done extracting to {out_path}.")
def zip_file(
in_path: Union[Path, str],
zip_path: Union[Path, str],
) -> None:
"""Zip a directory to a path.
Args:
in_path (Union[Path, str]): Path to input directory.
zip_path (Union[Path, str]): Path to zip file.
Raises:
ValueError: Path isn't a zip.
"""
log.info(f"Zipping {in_path} to {zip_path}...")
in_path = verify_path(in_path)
zip_path = verify_path(zip_path)
if not zip_path.suffix == ".zip":
raise ValueError(f"{zip_path} is not a zip file")
shutil.make_archive(
base_name=zip_path.parent / zip_path.stem, format="zip", root_dir=in_path
)
log.info(f"Done zipping to {zip_path}.")
zf = zipfile.ZipFile(str(zip_path))
zipped_size_mb = round(sum([i.compress_size for i in zf.infolist()]) / 1024 / 1024)
unzipped_size_mb = round(sum([i.file_size for i in zf.infolist()]) / 1024 / 1024)
log.info(f"Compressed: {zipped_size_mb}MB, actual: {unzipped_size_mb}MB.") | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/files.py | files.py |
import json
import logging
import multiprocessing
import signal
import sys
import time
import traceback
from functools import wraps
from pprint import pformat
from typing import Any, Dict
import zmq
import gin
import zpy
log = logging.getLogger(__name__)
class InvalidRequest(Exception):
"""Network message to launcher is incorrect."""
pass
def verify_key(request: Dict, key: str, key_type: type = None) -> Any:
"""Check a request dict for key, raise error if not present or wrong type.
Args:
request (Dict): Request dictionary.
key (str): Key to look for in dictionary.
key_type (type, optional): The datatype that the value to the corresponding key should be.
Raises:
InvalidRequest: Key is not present, or value is of wrong type.
Returns:
Any: Value at the key.
"""
value = request.get(key, None)
if value is None:
raise InvalidRequest(f"Required key {key} not found.")
if key_type is not None:
if not isinstance(value, key_type):
raise InvalidRequest(f"Key {key} must be of type {key_type}.")
return value
class Process(multiprocessing.Process):
"""Allows bubbiling up exceptions from a python process."""
def __init__(self, *args, **kwargs):
multiprocessing.Process.__init__(self, *args, **kwargs)
self._pconn, self._cconn = multiprocessing.Pipe()
self._exception = None
def run(self):
try:
multiprocessing.Process.run(self)
self._cconn.send(None)
except Exception as e:
tb = traceback.format_exc()
self._cconn.send((str(e), str(tb)))
raise e
@property
def exception(self):
if self._pconn.poll():
self._exception = self._pconn.recv()
return self._exception
def request_as_process(request_func):
"""Decorator for running a request as seperate processes.
Args:
run_func (callable): function to be decorated.
Returns:
[callable]: Wrapped function.
"""
@wraps(request_func)
def wrapped_request_func(request: Dict) -> None:
_reply = multiprocessing.Manager().dict()
p = Process(target=request_func, args=(request, _reply))
p.start()
p.join()
global reply
reply.update(_reply)
if p.exception:
reply["exception"] = p.exception[0]
reply["trace"] = p.exception[1]
reply["code"] = 400
return wrapped_request_func
# Global signal variables (see func below)
abort = None
waiting = None
reply = None
def handle_signal(signum, frame) -> None:
"""Handle interrupt signal."""
log.info(f"Received interrupt signal {signum}")
if waiting:
sys.exit(1)
global abort
abort = True
def accept_requests(run_func):
"""Decorator for accepting requests as seperate processes.
Args:
run_func (callable): function to be decorated.
Returns:
[callable]: Wrapped function.
"""
@wraps(run_func)
def wrapped_run_func(bind_uri: str) -> None:
# This is the main entrypoint for request based communication
log.info("Configuring zmq socket...")
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind(bind_uri)
signal.signal(signal.SIGTERM, handle_signal)
global abort, waiting, reply
abort = False
while not abort:
log.info("Waiting for requests...")
waiting = True
request = json.loads(socket.recv_json())
zpy.logging.linebreaker_log("new request")
log.info(f"New request: {pformat(request)}")
waiting = False
# Reply will include duration of request
start_time = time.time()
try:
# Request can set a log level
log_level = request.get("log_level", None)
if log_level is not None:
zpy.logging.set_log_levels(level=log_level)
# Default reply will include a message and an error code
reply = {
"request": request,
"code": 200,
}
# Reset any gin configs
try:
gin.enter_interactive_mode()
gin.clear_config()
except Exception as e:
log.warning(f"Could not reset gin configs before request: {e}")
# Call the function that was given
run_func(request)
except Exception as e:
reply["exception"] = str(e)
reply["code"] = 400
# Duration of request is logged and sent in reply
duration = time.time() - start_time
reply["duration"] = duration
# Send reply message back through the socket
zpy.logging.linebreaker_log("reply")
log.info(f"{pformat(reply)}")
socket.send_json(json.dumps(reply))
log.info("Exiting launcher.")
return wrapped_run_func
def send_request(
request: Dict,
ip: str = "127.0.0.1",
port: str = "5555",
) -> Dict:
"""Send a request over a uri.
Args:
request (Dict): Request dictionary sent over the socket.
ip (str, optional): ip address. Defaults to '127.0.0.1'.
port (str, optional): port on ip address. Defaults to '5555'.
Returns:
Dict: Reply dictionary.
"""
log.info(f"Connecting to {ip}:{port} ...")
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect(f"tcp://{ip}:{port}")
log.info("... Done!")
log.info(f"Sending request: {request}")
socket.send_json(json.dumps(request))
log.info("Waiting for response...")
response = json.loads(socket.recv_json())
log.info(f"Received response: {pformat(response)}")
return response | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/requests.py | requests.py |
import logging
import bpy
import gin
import zpy
log = logging.getLogger(__name__)
@gin.configurable
class Keypoints:
"""Functionality for Keypoints."""
# COCO(From pycoco)
COCO_NAMES = [
"nose",
"left_eye",
"right_eye",
"left_ear",
"right_ear",
"left_shoulder",
"right_shoulder",
"left_elbow",
"right_elbow",
"left_wrist",
"right_wrist",
"left_hip",
"right_hip",
"left_knee",
"right_knee",
"left_ankle",
"right_ankle",
]
COCO_CONNECTIVITY = [
[16, 14],
[14, 12],
[17, 15],
[15, 13],
[12, 13],
[6, 12],
[7, 13],
[6, 7],
[6, 8],
[7, 9],
[8, 10],
[9, 11],
[2, 3],
[1, 2],
[1, 3],
[2, 4],
[3, 5],
[4, 6],
[5, 7],
]
COCO_BONE_LOOKUP_MIXAMO = {
"nose": "NoseEnd",
"left_eye": "LeftEyeEnd",
"right_eye": "RightEyeEnd",
"left_ear": "LeftEarEnd",
"right_ear": "RightEarEnd",
"left_shoulder": "LeftArm",
"right_shoulder": "RightArm",
"left_elbow": "LeftForeArm",
"right_elbow": "RightForeArm",
"left_wrist": "LeftHand",
"right_wrist": "RightHand",
"left_hip": "LeftUpLeg",
"right_hip": "RightUpLeg",
"left_knee": "LeftLeg",
"right_knee": "RightLeg",
"left_ankle": "LeftFoot",
"right_ankle": "RightFoot",
}
COCO_BONE_LOOKUP_ANIMA = {
# TODO: Anima armature does not have eyes, ears, nose
"nose": "Head",
"left_eye": "Head",
"right_eye": "Head",
"left_ear": "Head",
"right_ear": "Head",
"left_shoulder": "LeftArm",
"right_shoulder": "RightArm",
"left_elbow": "LeftForeArm",
"right_elbow": "RightForeArm",
"left_wrist": "LeftHand",
"right_wrist": "RightHand",
"left_hip": "LeftUpLeg",
"right_hip": "RightUpLeg",
"left_knee": "LeftLeg",
"right_knee": "RightLeg",
"left_ankle": "LeftFoot",
"right_ankle": "RightFoot",
}
# Body25B
BODY25B_NAMES = [
"Nose",
"Neck",
"RShoulder",
"RElbow",
"RWrist",
"LShoulder",
"LElbow",
"LWrist",
"RHip",
"RKnee",
"RAnkle",
"LHip",
"LKnee",
"LAnkle",
"REye",
"LEye",
"REar",
"LEar",
]
BODY25B_CONNECTIVITY = [
[0, 1],
[0, 2],
[1, 3],
[2, 4],
[5, 7],
[6, 8],
[7, 9],
[8, 10],
[5, 11],
[6, 12],
[11, 13],
[12, 14],
[13, 15],
[14, 16],
[15, 19],
[19, 20],
[15, 21],
[16, 22],
[22, 23],
[16, 24],
[5, 17],
[6, 17],
[17, 18],
[11, 12],
]
BODY25B_BONE_LOOKUP_MIXAMO = {
"Nose": "NoseEnd",
"Neck": "Head",
"RShoulder": "RightShoulder",
"RElbow": "RightForeArm",
"RWrist": "RightHand",
"LShoulder": "LeftShoulder",
"LElbow": "LeftForeArm",
"LWrist": "LeftHand",
"RHip": "RightUpLeg",
"RKnee": "RightLeg",
"RAnkle": "RightFoot",
"LHip": "LeftUpLeg",
"LKnee": "LeftLeg",
"LAnkle": "LeftFoot",
"REye": "RightEyeEnd",
"LEye": "LeftEyeEnd",
"REar": "RightEarEnd",
"LEar": "LeftEarEnd",
}
# TODO: Anima armature
BODY25B_BONE_LOOKUP_ANIMA = {
"Nose": "Head",
"Neck": "Head",
"RShoulder": "RightShoulder",
"RElbow": "RightForeArm",
"RWrist": "RightHand",
"LShoulder": "LeftShoulder",
"LElbow": "LeftForeArm",
"LWrist": "LeftHand",
"RHip": "RightUpLeg",
"RKnee": "RightLeg",
"RAnkle": "RightFoot",
"LHip": "LeftUpLeg",
"LKnee": "LeftLeg",
"LAnkle": "LeftFoot",
"REye": "RightEyeEnd",
"LEye": "LeftEyeEnd",
"REar": "RightEarEnd",
"LEar": "LeftEarEnd",
}
def __init__(
self,
root: bpy.types.Object,
style: str = "coco",
armature: str = "anima",
):
"""Initialize keypoint object."""
if style == "coco":
self.names = self.COCO_NAMES
self.connectivity = self.COCO_CONNECTIVITY
if armature == "mixamo":
self.bone_lookup = self.COCO_BONE_LOOKUP_MIXAMO
elif armature == "anima":
self.bone_lookup = self.COCO_BONE_LOOKUP_ANIMA
else:
raise ValueError(f"Unknown keypoint armature: {armature}")
elif style == "body25b":
self.names = self.BODY25B_NAMES
self.connectivity = self.BODY25B_CONNECTIVITY
if armature == "mixamo":
self.bone_lookup = self.BODY25B_BONE_LOOKUP_MIXAMO
elif armature == "anima":
self.bone_lookup = self.BODY25B_BONE_LOOKUP_ANIMA
else:
raise ValueError(f"Unknown keypoint armature: {armature}")
else:
raise ValueError(f"Unknown keypoint style: {style}")
self.root = root
self.style = style
self.armature = armature
self.bones = {bone.name: bone for bone in self.root.pose.bones}
self.num_keypoints = None
self.keypoints_xyv = None
self.keypoints_xyz = None
def update(
self,
world_transform=None,
) -> None:
"""Add a keypoint skeleton."""
self.num_keypoints = 0
self.keypoints_xyv = []
self.keypoints_xyz = []
for name, bone_name in self.bone_lookup.items():
bone = self.bones.get(bone_name, None)
if bone is None:
log.warning(f"Could not find keypoint bone {name} using {bone_name}")
if world_transform is None:
pos = self.root.matrix_world @ bone.head
else:
pos = world_transform @ self.root.matrix_world @ bone.head
x, y, v = zpy.camera.camera_xyv(pos, obj=self.root)
self.keypoints_xyv += [x, y, v]
self.keypoints_xyz += tuple(pos)
self.num_keypoints += 1 | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/keypoints.py | keypoints.py |
from typing import Dict, Union
from pathlib import Path
import json
import requests
import logging
ENDPOINT = "https://ragnarok.zumok8s.org/api/v1/experiment/"
experiment = None
logger = None
def init(
name: str,
sim: str = None,
dataset: str = None,
config: Dict = None,
api_key: str = None,
) -> None:
"""Initialize a experiment run.
Args:
name (str): identifier for experiment
sim (str. optional): identifier for simulation associated with experiment
dataset (str, optional): identifier for dataset associated with experiment
config (dict, optional): configuration details about experiment
api_key (str, required): api_key to auth with backend
Returns:
experiment object
"""
if api_key is None:
raise PermissionError("please input zpy api_key")
global logger
logger = logging.getLogger(__name__)
exp = Experiment(
name=name, sim=sim, dataset=dataset, config=config, api_key=api_key
)
global experiment
experiment = exp
exp._create()
def log(
metrics: str = None,
file_path: str = None,
) -> None:
"""Log an update to experiment.
Args:
metrics (str, optional): free form dictionary of data to log
file_path (str, optional): file path to upload
Raises:
FileNotFoundError: if file_path doesnt exist
"""
global experiment
exp = experiment
if file_path:
file_path = Path(file_path).resolve()
exp._update(file_path=file_path, metrics=metrics)
class Experiment:
"""experiment class for uploading"""
def __init__(
self,
name: str = None,
sim: str = None,
dataset: str = None,
config: Dict = None,
api_key: str = None,
) -> None:
"""Experiment Class for ragnarok upload.
Args:
name (str): identifier for experiment
sim (str. optional): identifier for simulation associated with experiment
dataset (str, optional): identifier for dataset associated with experiment
config (dict, optional): configuration details about experiment
api_key (str, required): api_key to auth with backend
"""
self.name = name
self.sim = sim
self.dataset = dataset
self.config = config
self.auth_headers = {"Authorization": "token {}".format(api_key)}
self.id = None
def _post(self, data=None):
"""post to endpoint"""
r = requests.post(ENDPOINT, data=data, headers=self.auth_headers)
if r.status_code != 201:
logger.debug(f"{r.text}")
r.raise_for_status()
self.id = json.loads(r.text)["id"]
logger.debug(f"{r.status_code}: {r.text}")
def _put(self, data=None, files=None):
"""put to endpoint"""
r = requests.put(
f"{ENDPOINT}{self.id}/", data=data, files=files, headers=self.auth_headers
)
if r.status_code != 200:
logger.debug(f"{r.text}")
r.raise_for_status()
logger.debug(f"{r.status_code}: {r.text}")
def _create(self):
"""request to create experiment"""
data = {"name": self.name}
if self.sim:
data["sim_name"] = self.sim
if self.dataset:
data["data_set_name"] = self.dataset
if self.config:
data["config"] = json.dumps(self.config)
self._post(data=data)
def _update(self, file_path: Union[Path, str] = None, metrics: Dict = None) -> None:
"""request to update experiment"""
data = {"name": self.name}
if metrics:
data["metrics"] = json.dumps(metrics)
files = None
if file_path:
files = {"file": open(file_path, "rb")}
data["file_name"] = Path(file_path).name
self._put(data=data, files=files) | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/ml.py | ml.py |
import logging
import os
import time
from pathlib import Path
from typing import Union, Tuple
import bpy
import gin
import zpy
log = logging.getLogger(__name__)
@gin.configurable
def make_aov_pass(
style: str = "instance",
) -> None:
"""Make AOV pass in Cycles."""
scene = zpy.blender.verify_blender_scene()
# Make sure engine is set to Cycles
if not (scene.render.engine == "CYCLES"):
log.warning(" Setting render engine to CYCLES to use AOV")
scene.render.engine = "CYCLES"
scene.render.use_compositing = True
# Only certain styles are available
valid_styles = ["instance", "category"]
assert (
style in valid_styles
), f"Invalid style {style} for AOV Output Node, must be in {valid_styles}."
# Go through existing passes and make sure it doesn't exist before creating
view_layer = zpy.blender.verify_view_layer()
for aov in view_layer.aovs.values():
if aov.name == style:
log.info(f"AOV pass for {style} already exists.")
return
bpy.ops.scene.view_layer_add_aov()
view_layer.aovs[-1].name = style
view_layer.update()
log.info(f"Created AOV pass for {style}.")
@gin.configurable
def make_aov_file_output_node(
style: str = "rgb",
add_hsv: bool = True,
add_lens_dirt: bool = False,
) -> bpy.types.CompositorNodeOutputFile:
"""Make AOV Output nodes in Composition Graph."""
log.info(f"Making AOV output node for {style}")
# Only certain styles are available
valid_styles = ["rgb", "depth", "instance", "category"]
assert (
style in valid_styles
), f"Invalid style {style} for AOV Output Node, must be in {valid_styles}."
# Make sure scene composition is using nodes
scene = zpy.blender.verify_blender_scene()
scene.use_nodes = True
tree = scene.node_tree
# Remove Composite Node if it exists
composite_node = tree.nodes.get("Composite")
if composite_node is not None:
tree.nodes.remove(composite_node)
rl_node = zpy.nodes.get_or_make("Render Layers", "CompositorNodeRLayers", tree)
# Instance and category require an AOV pass
if style in ["instance", "category"]:
zpy.render.make_aov_pass(style)
# Visualize node shows image in workspace
view_node = zpy.nodes.get_or_make(f"{style} Viewer", "CompositorNodeViewer", tree)
# File output node renders out image
fileout_node = zpy.nodes.get_or_make(
f"{style} Output", "CompositorNodeOutputFile", tree
)
fileout_node.mute = False
# HACK: Depth requires normalization node between layer and output
if style == "depth":
# Normalization node
norm_node = zpy.nodes.get_or_make(
f"{style} Normalize", "CompositorNodeNormalize", tree
)
# Negative inversion
invert_node = zpy.nodes.get_or_make(
f"{style} Negate", "CompositorNodeInvert", tree
)
# Link up the nodes
tree.links.new(rl_node.outputs["Depth"], norm_node.inputs[0])
tree.links.new(norm_node.outputs[0], invert_node.inputs["Color"])
tree.links.new(invert_node.outputs[0], view_node.inputs["Image"])
tree.links.new(invert_node.outputs[0], fileout_node.inputs["Image"])
elif style == "rgb":
_node = rl_node
if add_lens_dirt:
_node = lens_dirt_node(node_tree=tree, input_node=rl_node)
tree.links.new(rl_node.outputs["Image"], _node.inputs["Image"])
if add_hsv:
_node = hsv_node(node_tree=tree, input_node=rl_node)
tree.links.new(rl_node.outputs["Image"], _node.inputs["Image"])
tree.links.new(_node.outputs["Image"], view_node.inputs["Image"])
tree.links.new(_node.outputs["Image"], fileout_node.inputs["Image"])
else: # category and instance segmentation
tree.links.new(rl_node.outputs[style], view_node.inputs["Image"])
tree.links.new(rl_node.outputs[style], fileout_node.inputs["Image"])
return fileout_node
def hsv_node(
node_tree: bpy.types.NodeTree,
input_node: bpy.types.Node,
) -> bpy.types.Node:
"""Adds a Hue-Saturation-Value Node."""
hsv_node = zpy.nodes.get_or_make("HSV", "CompositorNodeHueSat", node_tree)
node_tree.links.new(input_node.outputs["Image"], hsv_node.inputs["Image"])
return hsv_node
def lens_dirt_node(
node_tree: bpy.types.NodeTree,
input_node: bpy.types.Node,
) -> bpy.types.Node:
"""TODO: Add lens dirt effect to a compositor node."""
log.warn("NotImplemented: lens dirt ")
return input_node
@gin.configurable
def render(
rgb_path: Union[Path, str] = None,
depth_path: Union[Path, str] = None,
iseg_path: Union[Path, str] = None,
cseg_path: Union[Path, str] = None,
width: int = 640,
height: int = 480,
hsv: Tuple[float] = None,
):
"""Render images using AOV nodes."""
scene = zpy.blender.verify_blender_scene()
scene.render.resolution_x = width
scene.render.resolution_y = height
scene.cycles.resolution_x = width
scene.cycles.resolution_y = height
scene.render.resolution_percentage = 100
scene.render.image_settings.file_format = "PNG"
# HACK: Prevents adding frame number to filename
scene.frame_end = scene.frame_current
scene.frame_start = scene.frame_current
scene.render.use_file_extension = False
scene.render.use_stamp_frame = False
scene.render.filepath = ""
# Create AOV output nodes
render_outputs = {
"rgb": rgb_path,
"depth": depth_path,
"instance": iseg_path,
"category": cseg_path,
}
for style, output_path in render_outputs.items():
if output_path is not None:
# Create output node if it is not in scene
if not scene.use_nodes:
scene.use_nodes = True
output_node = scene.node_tree.nodes.get(f"{style} Output", None)
if output_node is None:
output_node = make_aov_file_output_node(style=style)
output_node.base_path = str(output_path.parent)
output_node.file_slots[0].path = str(output_path.name)
output_node.format.file_format = "PNG"
output_node.format.color_mode = "RGB"
if style in ["rgb"]:
output_node.format.color_depth = "8"
output_node.format.view_settings.view_transform = "Filmic"
if hsv is not None:
hsv_node = scene.node_tree.nodes.get("HSV", None)
if hsv_node is not None:
hsv_node.inputs[1].default_value = max(0, min(hsv[0], 1))
hsv_node.inputs[2].default_value = max(0, min(hsv[1], 2))
hsv_node.inputs[3].default_value = max(0, min(hsv[2], 2))
else:
log.warn("Render given HSV but no HSV node found.")
if style in ["depth"]:
output_node.format.color_depth = "8"
output_node.format.use_zbuffer = True
if style in ["instance", "category"]:
output_node.format.color_depth = "8"
output_node.format.view_settings.view_transform = "Raw"
log.debug(f"Output node for {style} image pointing to {str(output_path)}")
if render_outputs.get("rgb", None) is not None:
# Mute segmentation and depth output nodes
_mute_aov_file_output_node("category", mute=True)
_mute_aov_file_output_node("instance", mute=True)
_mute_aov_file_output_node("depth", mute=True)
_mute_aov_file_output_node("rgb", mute=False)
default_render_settings()
_render()
cseg_is_on = render_outputs.get("category", None) is not None
iseg_is_on = render_outputs.get("instance", None) is not None
depth_is_on = render_outputs.get("depth", None) is not None
if cseg_is_on or iseg_is_on or depth_is_on:
# Un-mute segmentation and depth output nodes
_mute_aov_file_output_node("category", mute=(not cseg_is_on))
_mute_aov_file_output_node("instance", mute=(not iseg_is_on))
_mute_aov_file_output_node("depth", mute=(not depth_is_on))
_mute_aov_file_output_node("rgb", mute=True)
segmentation_render_settings()
_render()
# Save intermediate blenderfile
if log.getEffectiveLevel() == logging.DEBUG:
# HACK: Use whatever output path is not None
for style, output_path in render_outputs.items():
if output_path is not None:
break
_filename = f"_debug.post.{output_path.stem}.blend"
_path = output_path.parent / _filename
zpy.blender.save_debug_blenderfile(_path)
# HACK: Rename image outputs due to stupid Blender reasons
for style, output_path in render_outputs.items():
if output_path is not None:
_bad_name = str(output_path) + "%04d" % scene.frame_current
os.rename(_bad_name, str(output_path))
log.info(f"Rendered {style} image saved to {str(output_path)}")
# TODO: Eventually remove this deprecated function name
def render_aov(*args, **kwargs):
return render(*args, **kwargs)
def _mute_aov_file_output_node(style: str, mute: bool = True):
"""Mute (or un-mute) an AOV output node for a style."""
log.debug(f"Muting AOV node for {style}")
scene = zpy.blender.verify_blender_scene()
node = scene.node_tree.nodes.get(f"{style} Output", None)
if node is not None:
node.mute = mute
@gin.configurable
def default_render_settings(
samples: int = 96,
tile_size: int = 48,
spatial_splits: bool = False,
is_aggressive: bool = False,
) -> None:
"""Render settings for normal color images.
Args:
samples (int, optional): Number of Cycles samples per frame
tile_size (int, optional): Rendering tile size in pixel dimensions
spatial_splits (bool, optional): Toogle for BVH split acceleration
is_aggressive (bool, optional): Toogles aggressive render time reduction settings
"""
scene = zpy.blender.verify_blender_scene()
# Make sure engine is set to Cycles
if not (scene.render.engine == "CYCLES"):
log.warning(" Setting render engine to CYCLES")
scene.render.engine == "CYCLES"
scene.cycles.samples = samples
scene.cycles.use_adaptive_sampling = True
scene.cycles.use_denoising = False
scene.cycles.denoiser = "OPENIMAGEDENOISE"
scene.render.film_transparent = False
scene.render.dither_intensity = 1.0
scene.render.filter_size = 1.5
view_layer = zpy.blender.verify_view_layer()
scene.render.use_single_layer = True
view_layer.pass_alpha_threshold = 0.5
scene.cycles.max_bounces = 12
scene.cycles.diffuse_bounces = 4
scene.cycles.glossy_bounces = 4
scene.cycles.transparent_max_bounces = 4
scene.cycles.transmission_bounces = 12
scene.cycles.sample_clamp_indirect = 2.5
scene.cycles.sample_clamp_direct = 2.5
scene.cycles.blur_glossy = 1
scene.cycles.caustics_reflective = False
scene.cycles.caustics_refractive = False
scene.view_settings.view_transform = "Filmic"
scene.display.render_aa = "8"
scene.display.viewport_aa = "FXAA"
scene.display.shading.color_type = "TEXTURE"
scene.display.shading.light = "STUDIO"
scene.display.shading.show_specular_highlight = True
scene.render.tile_x = tile_size
scene.render.tile_y = tile_size
scene.cycles.debug_use_spatial_splits = spatial_splits
scene.render.use_persistent_data = True
if is_aggressive:
scene.cycles.samples = 64
scene.cycles.max_bounces = 8
scene.cycles.diffuse_bounces = 2
scene.cycles.glossy_bounces = 2
scene.cycles.transparent_max_bounces = 2
scene.cycles.transmission_bounces = 4
scene.render.use_simplify = True
scene.cycles.ao_bounces_render = 1
scene.world.light_settings.use_ambient_occlusion = True
scene.world.light_settings.distance = 40
scene.world.light_settings.ao_factor = 0.5
def segmentation_render_settings():
"""Render settings for segmentation images."""
scene = zpy.blender.verify_blender_scene()
# Make sure engine is set to Cycles
if not (scene.render.engine == "CYCLES"):
log.warning(" Setting render engine to CYCLES")
scene.render.engine == "CYCLES"
scene.render.film_transparent = True
scene.render.dither_intensity = 0.0
scene.render.filter_size = 0.0
scene.cycles.samples = 1
scene.cycles.diffuse_bounces = 0
scene.cycles.diffuse_samples = 0
view_layer = zpy.blender.verify_view_layer()
view_layer.pass_alpha_threshold = 0.0
scene.cycles.max_bounces = 0
scene.cycles.bake_type = "EMIT"
scene.cycles.use_adaptive_sampling = False
scene.cycles.use_denoising = False
scene.cycles.denoising_radius = 0
scene.view_settings.view_transform = "Raw"
scene.display.render_aa = "OFF"
scene.display.viewport_aa = "OFF"
scene.display.shading.color_type = "MATERIAL"
scene.display.shading.light = "FLAT"
scene.display.shading.show_specular_highlight = False
def _render(
threads: int = 4,
logfile_path: Union[Path, str] = "blender_render.log",
) -> None:
"""The actual call to render a frame in Blender.
Args:
threads (int, optional): Number of threads to render on. Defaults to 4.
logfile_path (Union[Path, str]): Path to save render logfile.
"""
start_time = time.time()
scene = zpy.blender.verify_blender_scene()
# TODO: Get a better default number based on number of available cores
scene.render.threads = threads
# TODO: The commented out code here only works on Linux (fails on Windows)
# try:
# # HACK: This disables the blender log by redirecting output to log file
# # https://blender.stackexchange.com/questions/44560
# open(logfile, 'a').close()
# old = os.dup(1)
# sys.stdout.flush()
# os.close(1)
# os.open(logfile, os.O_WRONLY)
# except Exception as e:
# log.warning(f'Render log removal raised exception {e}')
try:
# This is the actual render call
bpy.ops.render.render()
except Exception as e:
log.warning(f"Render raised exception {e}")
# try:
# # disable output redirection
# os.close(1)
# os.dup(old)
# os.close(old)
# except Exception as e:
# log.warning(f'Render log removal raised exception {e}')
duration = time.time() - start_time
log.info(f"Rendering took {duration}s to complete.") | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/render.py | render.py |
import logging
from datetime import date
from pathlib import Path
from typing import Dict, List, Tuple, Union
import gin
import zpy
log = logging.getLogger(__name__)
@gin.configurable
class Saver:
"""Stores annotations and categories throughout a run script.
Provides functions for some additional meta files.
Raises:
ValueError: Incorrect function arguments.
Returns:
Saver: The Saver object.
"""
# Names for annotation files, folders, configs, datasheets, etc
HIDDEN_METAFOLDER_FILENAME = Path(".zumometa")
HIDDEN_DATASHEET_FILENAME = Path("_viz.datasheet.txt")
GIN_CONFIG_FILENAME = Path("config.gin")
DATETIME_FORMAT = "20%y%m%d_%H%M%S"
DATETIME_YEAR_FORMAT = "20%y"
def __init__(
self,
output_dir: Union[Path, str] = None,
annotation_path: Union[Path, str] = None,
description: str = "Description of dataset.",
clean_dir: bool = True,
):
"""Creates a Saver object.
Args:
output_dir (Union[Path, str], optional): Directory where files will be dumped.
annotation_path (Union[Path, str], optional): Path where annotation file will be dumped.
description (str, optional): A couple sentences describing the dataset. Default 'Description of dataset.'.
clean_dir (bool, optional): Whether to empty/clean the output directory on object creation. Default True.
"""
# the output dir
if output_dir is None:
output_dir = zpy.files.default_temp_path()
self.output_dir = zpy.files.verify_path(output_dir, make=True, check_dir=True)
log.debug(f"Saver output directory at {output_dir}")
if clean_dir:
zpy.files.clean_dir(self.output_dir)
# Annotation files can be optionally written out to a different dir
if annotation_path is None:
self.annotation_path = annotation_path
else:
self.annotation_path = zpy.files.verify_path(annotation_path)
log.debug(f"Saver annotation path at {annotation_path}")
# Very similar keys to COCO-style
self.metadata = {
"description": description,
"contributor": "Zumo Labs",
"url": "zumolabs.ai",
"year": date.today().strftime(self.DATETIME_YEAR_FORMAT),
"date_created": date.today().strftime(self.DATETIME_FORMAT),
"save_path": str(self.output_dir),
}
self.categories = {}
self.annotations = []
# Reverse-lookup dictionaries for name/color to id
self.category_name_to_id = {}
@gin.configurable
def add_annotation(
self,
category: str = None,
subcategory: str = None,
subcategory_zero_indexed: bool = True,
**kwargs,
) -> Dict:
"""Add a new annotation to the Saver object.
Pass any additional keys you want in the annotation dict as kwargs.
Args:
category (str, optional): The category that this annotation belongs to. Defaults to None.
subcategory (str, optional): The sub-category that this annotation belongs to. Defaults to None.
subcategory_zero_indexed (bool, optional): Whether sub-categories are zero indexed. Defaults to True.
Returns:
Dict: The annotation dictionary.
"""
annotation = {"id": len(self.annotations)}
if category is not None:
category_id = self.category_name_to_id.get(category, None)
assert category_id is not None, f"Could not find id for category {category}"
self.categories[category_id]["count"] += 1
annotation["category_id"] = category_id
if subcategory is not None:
subcategory_id = self.categories[category_id]["subcategories"].index(
subcategory
)
self.categories[category_id]["subcategory_count"][subcategory_id] += 1
subcategory_id += 0 if subcategory_zero_indexed else 1
annotation["subcategory_id"] = subcategory_id
return annotation
@gin.configurable
def add_category(
self,
name: str = "default",
supercategories: List[str] = None,
subcategories: List[str] = None,
color: Tuple[float] = (0.0, 0.0, 0.0),
zero_indexed: bool = True,
**kwargs,
) -> Dict:
"""Add a new category (also known as classes) to the Saver object.
Pass any additional keys you want in the category dict as kwargs.
Args:
name (str, optional): Name of the category. Defaults to 'default'.
supercategories (List[str], optional): Names of any supercategories. Defaults to None.
subcategories (List[str], optional): Names of any subcategories. Defaults to None.
color (Tuple[float], optional): Color of the category in segmentation images. Defaults to (0., 0., 0.).
zero_indexed (bool, optional): Whether categories are zero-indexed. Defaults to True.
Returns:
Dict: The category dictionary.
"""
# Default for super- and sub- categories is empty list
supercategories = supercategories or []
subcategories = subcategories or []
category = {
"name": name,
"supercategories": supercategories,
"subcategories": subcategories,
"color": color,
"count": 0,
"subcategory_count": [0] * len(subcategories),
}
category.update(**kwargs)
category["id"] = len(self.categories.keys())
category["id"] += 0 if zero_indexed else 1
log.debug(f"Adding category: {zpy.files.pretty_print(category)}")
self.categories[category["id"]] = category
self.category_name_to_id[name] = category["id"]
return category
@gin.configurable
def remap_filter_categories(
self,
category_remap: Dict = None,
) -> None:
"""Re-map the categories (name and id correspondence).
This will also filter out any categories not in the category_remap dict.
Args:
category_remap (Dict, optional): Mapping of categorie names to id in {id : name}. Defaults to None.
Raises:
ValueError: Incorrect format for category remap dictionary.
"""
if category_remap is None:
log.warning("Attempted to remap categories with no category remap.")
return
# Intermediate variables for organization
category_remap_ids = []
category_remap_name_to_id = {}
category_remap_old_id_to_new_id = {}
# Check for duplicates and typing
for _id, _name in category_remap.items():
try:
_id = int(_id)
_name = str(_name)
except ValueError:
raise ValueError("Category remap must be {int : string}")
if _name in category_remap_name_to_id:
raise ValueError(f"Duplicate category name in remap: {_name}")
if _id in category_remap_ids:
raise ValueError(f"Duplicate category id in remap: {_id}")
category_remap_ids.append(_id)
category_remap_name_to_id[_name] = _id
# Make sure the category names all exist in current categories
category_names = [c["name"] for c in self.categories.values()]
for category_name in category_remap_name_to_id:
assert (
category_name in category_names
), f"Could not find category {category_name} in dataset when remap-ing"
# Go through all of the current categories
new_categories = {}
for old_id, category in self.categories.items():
# Matching is done using the name
if category["name"] in category_remap_name_to_id:
new_id = category_remap_name_to_id[category["name"]]
category_remap_old_id_to_new_id[old_id] = new_id
category["id"] = new_id
new_categories[new_id] = category
# Overwrite the old categories
self.categories = new_categories
# Go through all of the current annotations
new_annotations = []
# Replace the category_id in all annotations
for annotation in self.annotations:
if annotation["category_id"] in category_remap_old_id_to_new_id:
new_id = category_remap_old_id_to_new_id[annotation["category_id"]]
annotation["category_id"] = new_id
new_annotations.append(annotation)
# Overwrite the old annotations
self.annotations = new_annotations
def output_gin_config(self):
"""Output the full gin config."""
gin_config_filepath = self.output_dir / self.GIN_CONFIG_FILENAME
log.info(f"Writing out gin config to {gin_config_filepath}")
with open(gin_config_filepath, "w") as f:
f.write(gin.operative_config_str())
@staticmethod
def write_datasheet(datasheet_path: str = None, info: Dict = None):
"""Writes datasheet dict to file.
Args:
datasheet_path (str, optional): Path where datasheet will be written.
info (Dict, optional): Information to include in datasheet.
"""
with datasheet_path.open("w") as f:
for k, v in info.items():
f.write(f"{k},{v}\n")
@staticmethod
def clip_coordinate_list(
annotation: List[Union[int, float]] = None,
height: Union[int, float] = None,
width: Union[int, float] = None,
normalized: bool = False,
) -> List[Union[int, float]]:
"""Clip a list of pixel coordinates (e.g. segmentation polygon).
Args:
annotation (List[Union[int, float]], optional): List of pixel coordinates.
height (Union[int, float], optional): Height used for clipping.
width (Union[int, float], optional): Width used for clipping.
normalized (bool, optional): Whether coordinates are normalized (0, 1) or integer pixel values.
Defaults to False.
Returns:
List[Union[int, float]]: Clipped list of pixel coordniates.
"""
if any(isinstance(i, list) for i in annotation):
return [
zpy.saver.Saver.clip_coordinate_list(
height=height, width=width, normalized=normalized, annotation=ann
)
for ann in annotation
]
if normalized:
# Coordinates are in (0, 1)
max_x, max_y = 1.0, 1.0
else:
# Coordinates are w.r.t image height and width
max_x, max_y = width, height
new_annotation = []
# TODO: This zip unpack here is unreadable
for x, y in zip(*[iter(annotation)] * 2):
new_x, new_y = x, y
if x < 0:
new_x = 0
if y < 0:
new_y = 0
if x > max_x:
new_x = max_x
if y > max_y:
new_y = max_y
new_annotation.append(new_x)
new_annotation.append(new_y)
return new_annotation
@staticmethod
def clip_bbox(
bbox: List[Union[int, float]] = None,
height: Union[int, float] = None,
width: Union[int, float] = None,
normalized: bool = False,
) -> List[Union[int, float]]:
"""Clip a bounding box in [x, y, width, height] format.
Args:
bbox (List[Union[int, float]], optional): Bounding box in [x, y, width, height] format.
height (Union[int, float], optional): Height used for clipping.
width (Union[int, float], optional): Width used for clipping.
normalized (bool, optional): Whether bounding box values are normalized (0, 1) or integer pixel values.
Defaults to False.
Returns:
List[Union[int, float]]: Clipped bounding box in [x, y, width, height] format.
"""
if normalized:
# Coordinates are in (0, 1)
max_x, max_y = 1.0, 1.0
else:
# Coordinates are w.r.t image height and width
max_x, max_y = width, height
new_bbox = [0] * 4
new_bbox[0] = max(0, min(bbox[0], max_x))
new_bbox[1] = max(0, min(bbox[1], max_y))
new_bbox[2] = max(0, min(bbox[2], (max_x - new_bbox[0])))
new_bbox[3] = max(0, min(bbox[3], (max_y - new_bbox[1])))
return new_bbox | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/saver.py | saver.py |
import logging
from itertools import groupby
from pathlib import Path
from typing import Dict, List, Union
import numpy as np
from PIL import Image
from scipy import ndimage as ndi
from shapely.geometry import MultiPolygon, Polygon
from skimage import color, exposure, img_as_uint, io, measure
from skimage.morphology import binary_closing, binary_opening
from skimage.transform import resize
import gin
import zpy
log = logging.getLogger(__name__)
def open_image(
image_path: Union[Path, str],
) -> np.ndarray:
"""Open image from path to ndarray.
Args:
image_path (Union[Path, str]): Path to image.
Returns:
np.ndarray: Image as numpy array.
"""
image_path = zpy.files.verify_path(image_path, make=False)
img = None
try:
img = io.imread(image_path)
if img.shape[2] > 3:
log.debug("RGBA image detected!")
img = img[:, :, :3]
if img.max() > 2.0:
img = np.divide(img, 255.0)
except Exception as e:
log.error(f"Error {e} when opening {image_path}")
return img
def remove_alpha_channel(image_path: Union[Path, str]) -> None:
"""Remove the alpha channel in an image (overwrites image).
Args:
image_path (Union[Path, str]): Path to image.
"""
img = open_image(image_path)
io.imsave(image_path, img)
log.info(f"Saving image with no alpha channel at {image_path}")
@gin.configurable
def jpeg_compression(
image_path: Union[Path, str],
quality: int = 40,
) -> Path:
"""Add jpeg compression to an image (overwrites image).
Args:
image_path (Union[Path, str]): Path to image.
quality (int, optional): Compression quality. Defaults to 40.
Returns:
Path: Path to image.
"""
image_path = zpy.files.verify_path(image_path, make=False)
img = io.imread(image_path)
# img = Image.open(image_path)
# Make sure image is jpeg
if not image_path.suffix == ".jpeg":
image_path = image_path.with_suffix(".jpeg")
io.imsave(image_path, arr=img, quality=quality)
# img.save(image_path, "JPEG")
log.info(f"Saving compressed image at {image_path}")
return image_path
@gin.configurable
def resize_image(
image_path: Union[Path, str],
width: int = 640,
height: int = 480,
) -> Path:
"""Resize an image (overwrites image).
Args:
image_path (Union[Path, str]): Path to image.
width (int, optional): Width of image. Defaults to 640.
height (int, optional): Height of image. Defaults to 480.
Returns:
Path: Path to image.
"""
img = open_image(image_path)
resized_img = resize(img, (height, width), anti_aliasing=True)
io.imsave(image_path, resized_img)
def pixel_mean_std(
flat_images: List[np.ndarray],
) -> Dict:
"""Return the pixel mean and std from a flattened images array.
Args:
flat_images (List[np.ndarray]): Image pixels in a flattened array
Returns:
Dict: Pixel means and std as floats and integers (256)
"""
# HACK: Incorrect type assumption
flat_images = flat_images[0]
if np.amax(flat_images) > 1:
std_256 = np.std(flat_images, axis=0)
mean_256 = np.mean(flat_images, axis=0)
std = std_256 / 256
mean = mean_256 / 256
else:
std = np.std(flat_images, axis=0)
mean = np.mean(flat_images, axis=0)
std_256 = std * 256.0
mean_256 = mean * 256.0
return {
"mean": mean,
"std": std,
"mean_256": mean_256,
"std_256": std_256,
}
def flatten_images(
images: List[np.ndarray],
max_pixels: int = 500000,
) -> List[np.ndarray]:
"""Flatten a list of images in ndarray form.
Args:
images (List[np.ndarray]): List of images in ndarray form.
max_pixels (int, optional): Maximum number of pixels in the flattened array. Defaults to 500000.
Returns:
List[np.ndarray]: List of flattened images.
"""
flat_images = []
for image in images:
dims = np.shape(image)
if len(dims) == 3:
flat_images.append(np.reshape(image, (dims[0] * dims[1], dims[2])))
flat_images = np.concatenate(flat_images, axis=0)
subsample = flat_images[np.random.randint(flat_images.shape[0], size=max_pixels), :]
return [subsample]
def pad_with(vector, pad_width, iaxis, kwargs):
"""Pad a vector.
https://numpy.org/doc/stable/reference/generated/numpy.pad.html
"""
pad_value = kwargs.get("padder", 10)
vector[: pad_width[0]] = pad_value
vector[-pad_width[1] :] = pad_value
def binary_mask_to_rle(binary_mask) -> Dict:
"""Converts a binary mask to a RLE (run-length-encoded) dictionary.
https://stackoverflow.com/questions/49494337/encode-numpy-array-using-uncompressed-rle-for-coco-dataset
"""
binary_mask = np.asfortranarray(binary_mask)
rle = {"counts": [], "size": list(binary_mask.shape)}
counts = rle.get("counts")
for i, (value, elements) in enumerate(groupby(binary_mask.ravel(order="F"))):
if i == 0 and value == 1:
counts.append(0)
counts.append(len(list(elements)))
return rle
@gin.configurable
def seg_to_annotations(
image_path: Union[Path, str],
remove_salt: bool = True,
rle_segmentations: bool = False,
float_annotations: bool = False,
max_categories: int = 1000,
) -> List[Dict]:
"""Convert a segmentation image into bounding boxes and polygon segmentations.
Args:
image_path (Union[Path, str]): Path to image.
remove_salt (bool, optional): Remove salt when calculating bounding box and polygons. Defaults to True.
rle_segmentations (bool, optional): Include RLE polygons in annotation dictionaries. Defaults to False.
float_annotations (bool, optional): Include float (0, 1) bboxes/polygons in annotation dicts. Defaults False.
max_categories (int, optional): Maximum number of categories allowed in an image. Defaults to 1000.
Raises:
ValueError: Too many categories (usually means segmentation image is not single colors)
Returns:
List[Dict]: List of annotation dictionaries.
"""
log.info(f"Extracting annotations from segmentation: {image_path}")
image_path = zpy.files.verify_path(image_path, make=False)
img = open_image(image_path)
img_height, img_width = img.shape[0], img.shape[1]
# Unique colors represent each unique category
unique_colors = np.unique(img.reshape(-1, img.shape[2]), axis=0)
# Store bboxes, seg polygons, and area in annotations list
annotations = []
# Loop through each category
if unique_colors.shape[0] > max_categories:
raise ValueError(f"Over {max_categories} categories: {unique_colors.shape[0]}")
for i in range(unique_colors.shape[0]):
seg_color = unique_colors[i, :]
log.debug(f"Unique color {seg_color}")
if all(np.equal(seg_color, np.zeros(3))):
log.debug("Color is background.")
continue
# Make an image mask for this category
masked_image = img.copy()
mask = (img != seg_color).any(axis=-1)
masked_image[mask] = np.zeros(3)
masked_image = color.rgb2gray(masked_image)
if log.getEffectiveLevel() == logging.DEBUG:
masked_image_name = (
str(image_path.stem) + f"_masked_{i}" + str(image_path.suffix)
)
masked_image_path = image_path.parent / masked_image_name
io.imsave(
masked_image_path, img_as_uint(exposure.rescale_intensity(masked_image))
)
if remove_salt:
# Remove "salt"
# https://scikit-image.org/docs/dev/api/skimage.morphology
masked_image = binary_opening(masked_image)
masked_image = binary_closing(masked_image)
else:
masked_image = binary_opening(masked_image)
# HACK: Pad masked image so segmented objects that extend beyond
# image are properly contoured
masked_image = np.pad(masked_image, 1, pad_with, padder=False)
# RLE encoded segmentation from binary image
if rle_segmentations:
rle_segmentation = binary_mask_to_rle(masked_image)
# Fill in the holes
filled_masked_image = ndi.binary_fill_holes(masked_image)
# Get countours for each blob
contours = measure.find_contours(
filled_masked_image, 0.01, positive_orientation="low"
)
log.debug(f"found {len(contours)} contours for {seg_color} in {image_path}")
# HACK: Sometimes all you get is salt for an image, in this case
# do not add any annotation for this category.
if len(contours) == 0:
continue
segmentations = []
segmentations_float = []
bboxes = []
bboxes_float = []
areas = []
areas_float = []
polygons = []
for contour in contours:
# Flip from (row, col) representation to (x, y)
# and subtract the padding pixel
for j in range(len(contour)):
row, col = contour[j]
contour[j] = (col - 1, row - 1)
# Make a polygon and simplify it
poly = Polygon(contour)
poly = poly.simplify(1.0, preserve_topology=True)
polygons.append(poly)
# Segmentation
segmentation = np.array(poly.exterior.coords).ravel().tolist()
segmentations.append(segmentation)
segmentations_float.append(
[
x / img_height if k % 2 == 0 else x / img_width
for k, x in enumerate(segmentation)
]
)
# Bounding boxes
x, y, max_x, max_y = poly.bounds
bbox = (x, y, max_x - x, max_y - y)
bbox_float = [
bbox[0] / img_width,
bbox[1] / img_height,
bbox[2] / img_width,
bbox[3] / img_height,
]
bboxes.append(bbox)
bboxes_float.append(bbox_float)
# Areas
areas.append(poly.area)
areas_float.append(poly.area / (img_width * img_height))
# Combine the polygons to calculate the bounding box and area
multi_poly = MultiPolygon(polygons)
x, y, max_x, max_y = multi_poly.bounds
bbox = (x, y, max_x - x, max_y - y)
area = multi_poly.area
bbox_float = [
bbox[0] / img_width,
bbox[1] / img_height,
bbox[2] / img_width,
bbox[3] / img_height,
]
area_float = area / (img_width * img_height)
annotation = {
"color": tuple(seg_color),
# COCO standards
"segmentation": segmentations,
"bbox": bbox,
"area": area,
# List of list versions
"bboxes": bboxes,
"areas": areas,
}
if rle_segmentations:
annotation["segmentation_rle"] = rle_segmentation
if float_annotations:
annotation["segmentation_float"] = segmentations_float
annotation["bbox_float"] = bbox_float
annotation["area_float"] = area_float
annotation["bboxes_float"] = bboxes_float
annotation["areas_float"] = areas_float
annotations.append(annotation)
return annotations | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/image.py | image.py |
import logging
from pathlib import Path
from typing import Dict, Union
from datetime import timedelta
import gin
import zpy
log = logging.getLogger(__name__)
@gin.configurable
class VideoSaver(zpy.saver.Saver):
"""Holds the logic for saving video annotations at runtime."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.videos = {}
self.video_name_to_id = {}
@gin.configurable
def add_video(
self,
name: str = "default video",
style: str = "default",
output_path: Union[Path, str] = "/tmp/test.avi",
width: int = 640,
height: int = 480,
length: timedelta = 0,
zero_indexed: bool = True,
**kwargs,
) -> Dict:
"""Add a new annotation to the Saver object.
Args:
name (str, optional): Unique video name. Defaults to 'default video'.
style (str, optional): Type of image in [default, segmenation, depth]. Defaults to 'default'.
output_path (Union[Path, str], optional): Path to video file. Defaults to '/tmp/test.avi'.
width (int, optional): Width of video frame. Defaults to 640.
height (int, optional): Height of video frame. Defaults to 480.
length (timedelta, optional): Length of video in seconds. Defaults to 0.
zero_indexed (bool, optional): Whether video id is zero indexed. Defaults to True.
Returns:
Dict: The video annotation dictionary.
"""
video = {
"name": name,
"style": style,
"output_path": str(output_path),
"relative_path": str(Path(output_path).relative_to(self.output_dir)),
"width": width,
"height": height,
"length": length,
}
video.update(**kwargs)
video["id"] = len(self.videos.keys())
video["id"] += 0 if zero_indexed else 1
log.debug(f"Adding video: {zpy.files.pretty_print(video)}")
self.videos[video["id"]] = video
self.video_name_to_id[name] = video["id"]
return video
@gin.configurable
def add_annotation(
self,
*args,
video: str = "default video",
**kwargs,
) -> Dict:
"""Add a new annotation to the Saver object.
Args:
video (str, optional): Unique video name. Defaults to 'default video'.
Returns:
Dict: The annotation dictionary.
"""
annotation = super().add_annotation(*args, **kwargs)
video_id = self.video_name_to_id.get(video, None)
assert video_id is not None, f"Could not find id for video {video}"
annotation["video_id"] = video_id
annotation.update(**kwargs)
self.annotations.append(annotation)
return annotation
@gin.configurable
def output_meta_analysis(self):
"""Perform a full meta analysis, outputting some meta files."""
# TODO: implement meta-analysis for video datasets
pass | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/saver_video.py | saver_video.py |
import logging
from typing import List, Tuple
import bpy
import gin
import mathutils
import numpy as np
import zpy
log = logging.getLogger(__name__)
def kdtree_from_collection(
collections: List[bpy.types.Collection],
) -> mathutils.kdtree.KDTree:
"""Creates a KDTree of vertices from a collection of objects."""
# First get the size of the objects (number of vertices)
size = 0
for obj in zpy.objects.for_obj_in_collections(collections):
size += len(obj.data.vertices)
# Then add them to a tree object
kd = mathutils.kdtree.KDTree(size)
insert_idx = 0
for obj in zpy.objects.for_obj_in_collections(collections):
for v in obj.data.vertices:
world_coordinate_v = obj.matrix_world @ v.co
kd.insert(world_coordinate_v, insert_idx)
insert_idx += 1
# Balancing is the most expensive operation
kd.balance()
return kd
@gin.configurable
def floor_occupancy(
kdtree: mathutils.kdtree.KDTree,
x_bounds: Tuple[float],
y_bounds: Tuple[float],
z_height: float = 0.0,
num_voxels: int = 100,
) -> float:
"""Get occupancy percentage for floor (XY plane)."""
log.info("Calculating floor occupancy ....")
x_side_length = abs(x_bounds[1] - x_bounds[0])
y_side_length = abs(y_bounds[1] - y_bounds[0])
# Number of voxels determines number of points in each dimmension
voxel_cube_side_length = ((x_side_length * y_side_length) / num_voxels) ** (1 / 2)
num_points_x = x_side_length / voxel_cube_side_length
num_points_y = y_side_length / voxel_cube_side_length
# TODO: This can definitely be vectorized better
x_space, x_step = np.linspace(*x_bounds, num=int(num_points_x), retstep=True)
y_space, y_step = np.linspace(*y_bounds, num=int(num_points_y), retstep=True)
occupancy_grid = np.zeros((int(num_points_x), int(num_points_y)))
for x_idx, x in enumerate(x_space):
for y_idx, y in enumerate(y_space):
x = float(x)
y = float(y)
closest_point = kdtree.find((x, y, z_height))[0]
if (closest_point.x > (x - x_step)) and (closest_point.x < (x + x_step)):
if (closest_point.y > (y - y_step)) and (
closest_point.y < (y + y_step)
):
occupancy_grid[x_idx][y_idx] = 1.0
log.info("... Done.")
log.debug(f"Floor occupancy grid: {str(occupancy_grid)}")
return float(np.mean(occupancy_grid.copy()))
@gin.configurable
def volume_occupancy(
kdtree: mathutils.kdtree.KDTree,
x_bounds: Tuple[float],
y_bounds: Tuple[float],
z_bounds: Tuple[float],
num_voxels: int = 100,
) -> float:
"""Get occupancy percentage for volume."""
log.info("Calculating volume occupancy ....")
x_side_length = abs(x_bounds[1] - x_bounds[0])
y_side_length = abs(y_bounds[1] - y_bounds[0])
z_side_length = abs(z_bounds[1] - z_bounds[0])
# Number of voxels determines number of points in each dimmension
voxel_cube_side_length = (
(x_side_length * y_side_length * z_side_length) / num_voxels
) ** (1 / 3)
num_points_x = x_side_length / voxel_cube_side_length
num_points_y = y_side_length / voxel_cube_side_length
num_points_z = z_side_length / voxel_cube_side_length
# TODO: This can definitely be vectorized better
x_space, x_step = np.linspace(*x_bounds, num=int(num_points_x), retstep=True)
y_space, y_step = np.linspace(*y_bounds, num=int(num_points_y), retstep=True)
z_space, z_step = np.linspace(*z_bounds, num=int(num_points_z), retstep=True)
occupancy_grid = np.zeros((int(num_points_x), int(num_points_y), int(num_points_z)))
for x_idx, x in enumerate(x_space):
for y_idx, y in enumerate(y_space):
for z_idx, z in enumerate(z_space):
x = float(x)
y = float(y)
z = float(z)
closest_point = kdtree.find((x, y, z))[0]
if (closest_point.x > (x - x_step)) and (
closest_point.x < (x + x_step)
):
if (closest_point.y > (y - y_step)) and (
closest_point.y < (y + y_step)
):
if (closest_point.z > (z - z_step)) and (
closest_point.z < (z + z_step)
):
occupancy_grid[x_idx][y_idx][z_idx] = 1.0
log.info("... Done.")
log.debug(f"Volume occupancy grid: {str(occupancy_grid)}")
return float(np.mean(occupancy_grid.copy())) | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/kdtree.py | kdtree.py |
import logging
import math
from typing import Tuple, Union
import bpy
import bpy_extras
import gin
import mathutils
import zpy
log = logging.getLogger(__name__)
def verify(
camera: Union[bpy.types.Object, bpy.types.Camera, str],
check_none=True,
) -> bpy.types.Camera:
"""Return camera given name or typed object.
Args:
camera (Union[bpy.types.Object, bpy.types.Camera, str]): Camera object (or it's name)
check_none (bool, optional): Raise error if object does not exist. Defaults to True.
Raises:
ValueError: Object does not exist.
Returns:
bpy.types.Camera: Camera object.
"""
if isinstance(camera, str):
camera = bpy.data.cameras.get(camera)
if check_none and camera is None:
raise ValueError(f"Could not find camera {camera}.")
if camera is None:
log.info(f'No camera chosen, using default scene camera "{camera}".')
scene = zpy.blender.verify_blender_scene()
camera = scene.camera
return camera
def look_at(
obj: Union[bpy.types.Object, str],
location: Union[Tuple[float], mathutils.Vector],
roll: float = 0,
) -> None:
"""Rotate obj to look at target.
Based on: https://blender.stackexchange.com/a/5220/12947
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name) that does the looking (usually a camera)
location (Union[Tuple[float], mathutils.Vector]): Location (3-tuple or Vector) to be looked at.
roll (float, optional): The angle of rotation about the axis from obj to target in radians. Defaults to 0.
"""
obj = zpy.objects.verify(obj)
if not isinstance(location, mathutils.Vector):
location = mathutils.Vector(location)
loc = obj.location
# direction points from the object to the target
direction = location - obj.location
quat = direction.to_track_quat("-Z", "Y")
quat = quat.to_matrix().to_4x4()
# convert roll from radians to degrees
roll_matrix = mathutils.Matrix.Rotation(roll, 4, "Z")
# remember the current location, since assigning to obj.matrix_world changes it
loc = loc.to_tuple()
obj.matrix_world = quat @ roll_matrix
obj.location = loc
@gin.configurable
def camera_xyz(
location: Union[Tuple[float], mathutils.Vector],
camera: Union[bpy.types.Object, bpy.types.Camera, str] = None,
fisheye_lens: bool = False,
) -> Tuple[float]:
"""Get pixel coordinates of point in camera space.
- (0, 0) is the bottom left of the camera frame.
- (1, 1) is the top right of the camera frame.
- Values outside 0-1 are also supported.
- A negative βzβ value means the point is behind the camera.
Args:
location (mathutils.Vector): Location (3-tuple or Vector) of point in 3D space.
camera (Union[bpy.types.Object, bpy.types.Camera, str]): Camera in which pixel space exists.
fisheye_lens (bool, optional): Whether to use fisheye distortion. Defaults to False.
Returns:
Tuple[float]: Pixel coordinates of location.
"""
camera = zpy.camera.verify(camera)
if not isinstance(location, mathutils.Vector):
location = mathutils.Vector(location)
scene = zpy.blender.verify_blender_scene()
point = bpy_extras.object_utils.world_to_camera_view(scene, camera, location)
# TODO: The z point here is incorrect?
log.debug(f"Point {point}")
if point[2] < 0:
log.debug("Point is behind camera")
# Fix the point based on camera distortion
if fisheye_lens:
log.debug("Correcting for fisheye distortion")
# HACK: There should be a better place to put this
bpy.data.cameras[0].lens_unit = "FOV"
bpy.data.cameras[0].lens = 18.0
# https://blender.stackexchange.com/questions/40702/how-can-i-get-the-projection-matrix-of-a-panoramic-camera-with-a-fisheye-equisol?noredirect=1&lq=1
# Note this assumes 180 degree FOV
cam = bpy.data.cameras[camera.name]
f = cam.lens
w = cam.sensor_width
h = cam.sensor_height
p = camera.matrix_world.inverted() @ location
p.normalize()
# Calculate our angles
phi = math.atan2(p.y, p.x)
length = (p.x ** 2 + p.y ** 2) ** (1 / 2)
theta = math.asin(length)
# Equisolid projection
r = 2.0 * f * math.sin(theta / 2)
u = r * math.cos(phi) / w + 0.5
v = r * math.sin(phi) / h + 0.5
# x = u * scene.render.resolution_x
# y = v * scene.render.resolution_y
# TODO: The value of point[2] here is not exactly correct ...
return u, v, point[2]
else:
return point[0], point[1], point[2]
def is_child_hit(
obj: Union[bpy.types.Object, str],
hit_obj: Union[bpy.types.Object, str],
) -> bool:
"""Recursive function to check if a child object is the hit object.
Args:
obj (Union[bpy.types.Object, str]): Scene object (or it's name) that might contain a hit child.
hit_obj (Union[bpy.types.Object, str]): Scene object (or it's name) that was hit
Returns:
bool: If the hit object is a child object.
"""
obj = zpy.objects.verify(obj)
hit_obj = zpy.objects.verify(hit_obj)
if obj == hit_obj:
return True
else:
for child in obj.children:
if is_child_hit(child, hit_obj):
return True
return False
def is_visible(
location: Union[Tuple[float], mathutils.Vector],
obj_to_hit: Union[bpy.types.Object, str],
camera: Union[bpy.types.Object, bpy.types.Camera, str] = None,
) -> bool:
"""Cast a ray to determine if object is visible from camera.
Args:
location (Union[Tuple[float], mathutils.Vector]): Location to shoot out ray towards.
obj_to_hit (Union[bpy.types.Object, str]): Object that should be hit by ray.
camera (Union[bpy.types.Object, bpy.types.Camera, str]): Camera where ray originates from.
Returns:
bool: Whether the casted ray has hit the object.
"""
camera = zpy.camera.verify(camera)
obj_to_hit = zpy.objects.verify(obj_to_hit)
if not isinstance(location, mathutils.Vector):
location = mathutils.Vector(location)
view_layer = zpy.blender.verify_view_layer()
scene = zpy.blender.verify_blender_scene()
result = scene.ray_cast(
depsgraph=view_layer.depsgraph,
origin=camera.location,
direction=(location - camera.location),
)
# Whether a hit occured
is_hit = result[0]
# Object hit by raycast
hit_obj = result[4]
if not is_hit:
# Nothing was hit by the ray
log.debug(f"No raycast hit from camera to {obj_to_hit.name}")
return False
if is_child_hit(obj_to_hit, hit_obj):
# One of the children of the obj_to_hit was hit
log.debug(f"Raycast hit from camera to {obj_to_hit.name}")
return True
return False
@gin.configurable
def is_in_view(
location: Union[Tuple[float], mathutils.Vector],
camera: Union[bpy.types.Object, bpy.types.Camera, str] = None,
epsilon: float = 0.05,
) -> bool:
"""Is a location visible from a camera (within some epsilon).
Args:
location (Union[Tuple[float], mathutils.Vector]): Location that is visible or not.
camera (Union[bpy.types.Object, bpy.types.Camera, str]): Camera that wants to see the location.
epsilon (float, optional): How far outside the view box the point is allowed to be. Defaults to 0.05.
Returns:
bool: Whether the location is visible.
"""
camera = zpy.camera.verify(camera)
if not isinstance(location, mathutils.Vector):
location = mathutils.Vector(location)
x, y, z = camera_xyz(location, camera=camera)
if z < 0:
return False
if x < (0 - epsilon) or x > (1 + epsilon):
return False
if y < (0 - epsilon) or y > (1 + epsilon):
return False
return True
@gin.configurable
def camera_xyv(
location: Union[Tuple[float], mathutils.Vector],
obj: Union[bpy.types.Object, str],
camera: Union[bpy.types.Object, bpy.types.Camera, str] = None,
width: int = 640,
height: int = 480,
) -> Tuple[int]:
"""Get camera image xyv coordinates of point in scene.
Keypoint coordinates (x, y) are measured from the top left
image corner (and are 0-indexed). Coordinates are rounded
to the nearest pixel to reduce file size. Visibility (v):
v=0: not labeled (in which case x=y=0)
v=1: labeled but not visible
v=2: labeled and visible
Args:
location (Union[Tuple[float], mathutils.Vector]): Location (3-tuple or Vector) of point in 3D space.
obj (Union[bpy.types.Object, str]): Scene object (or it's name) to check for visibility.
camera (Union[bpy.types.Object, bpy.types.Camera, str]): Camera in which pixel space exists.
width (int, optional): Width of image. Defaults to 640.
height (int, optional): Height of image. Defaults to 480.
Returns:
Tuple[int]: (X, Y, V)
"""
camera = zpy.camera.verify(camera)
obj = zpy.objects.verify(obj)
if not isinstance(location, mathutils.Vector):
location = mathutils.Vector(location)
x, y, z = camera_xyz(location, camera=camera)
# visibility
v = 2
if x < 0 or y < 0 or z < 0:
v = 1
if not is_visible(location, obj_to_hit=obj, camera=camera):
v = 1
# bottom-left to top-left
y = 1 - y
# float (0, 1) to pixel int (0, pixel size)
x = int(x * width)
y = int(y * height)
log.debug(f"(x, y, v) {(x, y, v)}")
return x, y, v | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy/camera.py | camera.py |
from cli.utils import fetch_auth, download_url
from zpy.files import to_pathlib_path
import json
import requests
@fetch_auth
def fetch_sim(name, project, url, auth_headers):
"""fetch sim
Fetch info on a sim by name from backend.
Args:
name (str): name of sim
project (str): uuid of project
url (str): backend endpoint
auth_headers: authentication for backend
"""
endpoint = f"{url}/api/v1/sims/"
r = requests.get(
endpoint, params={"name": name, "project": project}, headers=auth_headers
)
if r.status_code != 200:
r.raise_for_status()
response = json.loads(r.text)
return response["results"][0]
@fetch_auth
def create_sim(name, path, project, url, auth_headers):
"""create sim
Upload sim object to S3 through ZumoLabs backend and create
the sim object.
Args:
project (str): uuid of parent project
name (str): name of sim to upload
path (str): file to upload
url (str): backend endpoint
auth_headers: authentication for backend
"""
endpoint = f"{url}/api/v1/sims/"
r = requests.post(
endpoint,
data={"name": name, "project": project},
files={"file": open(path, "rb")},
headers=auth_headers,
)
if r.status_code != 201:
r.raise_for_status()
@fetch_auth
def download_sim(name, path, project, url, auth_headers):
"""download sim
Download sim object from S3 through ZumoLabs backend.
Args:
name (str): name of sim to download
path (str): output directory
project (str): uuid of project
url (str): backend endpoint
auth_headers: authentication for backend
Returns:
str: output file path
"""
sim = fetch_sim(name, project)
endpoint = f"{url}/api/v1/sims/{sim['id']}/download"
r = requests.get(endpoint, headers=auth_headers)
if r.status_code != 200:
r.raise_for_status()
response = json.loads(r.text)
name_slug = f"{sim['name'].replace(' ', '_')}-{sim['id'][:8]}.zip"
output_path = to_pathlib_path(path) / name_slug
download_url(response["redirect_link"], output_path)
return output_path
@fetch_auth
def fetch_sims(filters, url, auth_headers):
"""fetch sims
Fetch sim objects from ZumoLabs backend.
Args:
filters (dict): query param filters for API call
url (str): backend endpoint
auth_headers: authentication for backend
"""
endpoint = f"{url}/api/v1/sims/"
r = requests.get(endpoint, headers=auth_headers, params=filters)
if r.status_code != 200:
r.raise_for_status()
return json.loads(r.text)["results"] | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/cli/sims.py | sims.py |
from zpy.files import to_pathlib_path
import os
import yaml
ENDPOINT = "https://ragnarok.zumok8s.org"
CONFIG_FILE = "~/.zpy/config.yaml"
def initialize_config():
"""initialize config
If CONFIG_FILE doesnt exist write it and put in prod as the endpoint. Also creates
the ~/.zpy folder if not existing. The config is some variables needed by the cli to
make validated requests to the backend.
"""
path = to_pathlib_path(os.path.expanduser(CONFIG_FILE))
if path.exists():
return
CONFIG = {"ENVIRONMENT": "prod", "TOKEN": None, "ENDPOINT": ENDPOINT}
path.parent.mkdir(parents=True, exist_ok=True)
write_config(CONFIG)
def read_config(file=CONFIG_FILE):
"""read config
Read zpy cli configuration file.
Args:
env: which enviroment to read config for
Returns:
config: dictionary of current configuration
"""
path = to_pathlib_path(os.path.expanduser(file))
with path.open() as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return config
def write_config(config, file=CONFIG_FILE):
"""write config
Write zpy cli configuration file.
Args:
config (dict): new configuration to write
"""
path = to_pathlib_path(os.path.expanduser(file))
with path.open("w") as f:
yaml.dump(config, f)
def add_env(name, endpoint):
"""add environment
Add a new environment configuration file.
Args:
name: name of the environment
endpoint: endpoint for the new enviroment
"""
new_config = {"ENVIRONMENT": name, "TOKEN": None, "ENDPOINT": endpoint}
write_config(new_config, file=f"~/.zpy/{name}.yaml")
def swap_env(name):
"""swap environment
Swap the current environment configuration.
Args:
name: swap to this env
"""
old_config = read_config()
new_config = read_config(file=f"~/.zpy/{name}.yaml")
write_config(new_config)
write_config(old_config, file=f"~/.zpy/{old_config['ENVIRONMENT']}.yaml") | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/cli/config.py | config.py |
from cli.utils import fetch_auth
import requests
import json
@fetch_auth
def create_transform(name, operation, config, project, url, auth_headers):
"""create transform
Create a job object on ZumoLabs backend. This will trigger the backend
to run the job.
Args:
name (str): name of dataset
operation (str): transform type
config (dict): configuration for transform
project (str): project uuid
url (str): backend endpoint
auth_headers: authentication for backend
"""
from cli.datasets import fetch_dataset
dataset = fetch_dataset(name)
endpoint = f"{url}/api/v1/transforms/"
data = {
"project": project,
"operation": operation,
"name": name,
"input_dataset": dataset["id"],
"config": json.dumps(config),
}
r = requests.post(endpoint, data=data, headers=auth_headers)
if r.status_code != 201:
r.raise_for_status()
@fetch_auth
def fetch_transforms(filters, url, auth_headers):
"""fetch transforms
Fetch transform objects from ZumoLabs backend.
Args:
filters (dict): query param filters for API call
url (str): backend endpoint
auth_headers: authentication for backend
Returns:
list: list of transforms
"""
endpoint = f"{url}/api/v1/transforms/"
r = requests.get(endpoint, headers=auth_headers, params=filters)
if r.status_code != 200:
r.raise_for_status()
return json.loads(r.text)["results"]
@fetch_auth
def available_transforms(url, auth_headers):
"""available transforms
List all transforms available on the backend.
Args:
url (str): backend endpoint
auth_headers: authentication for backend
Returns:
list: list of transforms
"""
endpoint = f"{url}/api/v1/transforms/available/"
r = requests.get(endpoint, headers=auth_headers)
if r.status_code != 200:
r.raise_for_status()
return json.loads(r.text) | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/cli/transforms.py | transforms.py |
import functools
import math
from copy import deepcopy
from itertools import product
from pathlib import Path
from typing import Union
from urllib.request import urlopen
import click
from tqdm import tqdm
from cli.config import read_config
def parse_filter(str_filter):
"""parse filter
Parse filter string to field, pattern, regex.
Valid patterns depend on field but can include exact, startswith, contains,
iexact, istartswith, icontains. If no pattern or field is provided the defaults
are name for field and startswith for pattern.
Examples:
icontains:hello
name:icontains:foobar
state:iexact:ready
foo
Args:
filter (str): filter query
Return:
field: field to filter on
pattern: pattern to apply regex
regex: string regex for pattern
"""
filter_arr = str_filter.split(":")
field, pattern, regex = "name", "startswith", filter_arr[-1]
if len(filter_arr) == 2:
pattern = filter_arr[0]
elif len(filter_arr) == 3:
field, pattern = filter_arr[0], filter_arr[1]
return field, pattern, regex
def resolve_sweep(sweep_config):
"""resolve sweep
Resolve a dictionary into a sweep of dictionaries. Gin bindings
are used to communicate with job code and therefore the sweep is done
across gin_bindings in the sweep_config.
Example:
{'foo': ['a', 'b']} -> {'foo': 'a'} {'foo': 'b'}
Args:
sweep_config (dict): dictionary to unfold into sweep
Returns:
list: list of configs resolved from sweep config
"""
configs, bindings = [], sweep_config["gin_bindings"]
for random_binding in [dict(zip(bindings, v)) for v in product(*bindings.values())]:
config = deepcopy(sweep_config)
config["gin_bindings"] = random_binding
configs.append(config)
return configs
def parse_args(args):
"""parse args
Used by cli to parse arguments passed to cli calls. Includes
safe eval function to convert from string to other types.
Example:
foo 1 bar 2 -> {'foo': 1, 'bar': 2}
"""
def _safe_eval(key):
try:
return eval(key)
except Exception:
return key
keys = args[::2]
vals = map(lambda x: _safe_eval(x), args[1::2])
return dict(zip(keys, vals))
def download_url(url: str, output_path: Union[Path, str]):
"""download url
Download from url to give output path and visualize using tqdm.
Args:
url (str): url to download
output_path (Union[Path, str]): path to download file to
"""
u = urlopen(url)
h = u.info()
totalSize = int(h["Content-Length"])
fp = open(output_path, "wb")
blockSize = 8192
with tqdm(total=totalSize) as pbar:
while True:
chunk = u.read(blockSize)
if not chunk:
break
fp.write(chunk)
pbar.update(blockSize)
fp.flush()
fp.close()
def fetch_auth(func):
"""fetch authentication
Decorator to wrap functions providing the backend url and the
correct authorization headers for requests.
Args:
func: function to wrap
Returns:
wrapped function
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
config = read_config()
endpoint = config["ENDPOINT"]
auth_header = {"Authorization": "token {}".format(config["TOKEN"])}
return func(*args, **kwargs, url=endpoint, auth_headers=auth_header)
return wrapper
def use_project(required=False):
def use_project_inner(func):
"""Inject project uuid into function call. Optionally throw an error if it has not been set.
Args:
func: function to wrap
Returns:
wrapped function
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
config = read_config()
project = config.get("PROJECT", None)
if project:
click.echo(f"Using project {project}")
return func(*args, **kwargs, project=project)
else:
if required:
click.secho(
"Project is not set. See `zpy project --help`",
fg="red",
err=True,
)
return
return func(*args, **kwargs)
return wrapper
return use_project_inner
def print_list_as_columns(list_of_strings, num_cols=5, indent_prefix=" "):
"""Format and echo a list of strings into nicely formatted columns.
Args:
list_of_strings (list of str): A list of similar strings to format into columns.
num_cols (int): Desired number of columns.
indent_prefix (str): String to attach to the beginning of every printed line.
Returns:
None
"""
count = len(list_of_strings)
col_width = max(len(string) for string in list_of_strings)
num_rows = math.ceil(count / num_cols)
for i in range(num_rows):
start_index = i * num_cols
end_index = (i + 1) * num_cols
if end_index > len(list_of_strings):
end_index = len(list_of_strings)
row = list_of_strings[start_index:end_index]
format_string = indent_prefix + " ".join(
["{{:<{}}}".format(col_width) for _ in row]
)
click.echo(format_string.format(*row)) | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/cli/utils.py | utils.py |
import json
import requests
from cli.utils import download_url, fetch_auth, parse_filter
from zpy.files import to_pathlib_path
@fetch_auth
def create_dataset(name, file_path, project, url, auth_headers):
"""create dataset
Create dataset on ZumoLabs backend which groups files.
Args:
name (str): name of dataset
file_path (list): zip file to upload
url (str): backend endpoint
auth_headers: authentication for backend
"""
endpoint = f"{url}/api/v1/datasets/"
data = {"name": name, "project": project}
if file_path:
r = requests.post(
endpoint,
data=data,
files={"file": open(file_path, "rb")},
headers=auth_headers,
)
else:
r = requests.post(
endpoint,
data=data,
headers=auth_headers,
)
if r.status_code != 201:
r.raise_for_status()
return json.loads(r.text)
@fetch_auth
def generate_dataset(dataset_name, sim_name, count, config, project, url, auth_headers):
"""generate dataset
Generate files for a dataset on ZumoLabs backend which will launch
a generation job with specified params.
Args:
dataset_name (str): name of dataset
sim_name (str): name of sim to generate from
count (int): number of times to run the sim
config (dict): configration of sim for this dataset
project (str): project uuid
url (str): backend endpoint
auth_headers: authentication for backend
"""
from cli.sims import fetch_sim
sim = fetch_sim(sim_name, project)
dataset = create_dataset(dataset_name, None, project)
endpoint = f"{url}/api/v1/datasets/{dataset['id']}/generate/"
data = {
"sim": sim["id"],
"amount": count,
"config": json.dumps(config),
"project": project,
}
r = requests.post(
endpoint,
data=data,
headers=auth_headers,
)
if r.status_code != 200:
r.raise_for_status()
@fetch_auth
def download_dataset(name, path, url, auth_headers):
"""download dataset
Download dataset object from S3 through ZumoLabs backend.
Args:
name (str): name of dataset to download
path (str): output directory
url (str): backend endpoint
auth_headers: authentication for backend
Returns:
str: output file path
"""
dataset = fetch_dataset(name)
endpoint = f"{url}/api/v1/datasets/{dataset['id']}/download/"
r = requests.get(endpoint, headers=auth_headers)
if r.status_code != 200:
r.raise_for_status()
dataset = json.loads(r.text)
name_slug = f"{name.replace(' ', '_')}.zip"
output_path = to_pathlib_path(path) / name_slug
download_url(dataset["redirect_link"], output_path)
return output_path
@fetch_auth
def fetch_datasets(filters, url, auth_headers):
"""fetch datasets
Fetch dataset names from backend. This is done through tags.
Args:
filters (dict): query param filters for API call
Returns:
list: paginated sorted datasets for all types
"""
endpoint = f"{url}/api/v1/datasets/"
r = requests.get(endpoint, headers=auth_headers, params=filters)
if r.status_code != 200:
r.raise_for_status()
return json.loads(r.text)["results"]
@fetch_auth
def fetch_dataset(name, url, auth_headers):
"""fetch dataset
Fetch info on a dataset by name from backend.
Args:
name (str): name of dataset
url (str): backend endpoint
auth_headers: authentication for backend
"""
endpoint = f"{url}/api/v1/datasets/"
r = requests.get(endpoint, params={"name": name}, headers=auth_headers)
if r.status_code != 200:
r.raise_for_status()
response = json.loads(r.text)
if response["count"] != 1:
raise NameError(f"found {response['count']} datasets for name {name}")
return response["results"][0]
@fetch_auth
def filter_datasets(dfilter, project, url, auth_headers):
"""filter datasets
Filter dataset objects on ZumoLabs backend by given dfilter.
Parse dfilter using parse_filter.
Args:
dfilter (str): filter query for datasets
project (str): project uuid
url (str): backend endpoint
auth_headers: authentication for backend
Return:
dict: filtered datasets by dfilter {'id': 'name'}
"""
filtered_datasets = {}
field, pattern, regex = parse_filter(dfilter)
endpoint = f"{url}/api/v1/datasets/"
params = {
"project": project,
f"{field}__{pattern}": regex,
}
while endpoint is not None:
r = requests.get(endpoint, headers=auth_headers, params=params)
if r.status_code != 200:
r.raise_for_status()
response = json.loads(r.text)
for dataset in response["results"]:
filtered_datasets[dataset["id"]] = dataset["name"]
endpoint = response["next"]
return filtered_datasets | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/cli/datasets.py | datasets.py |
import json
import click
import requests
from requests.auth import HTTPBasicAuth
from table_logger import TableLogger
from cli.config import initialize_config, read_config, write_config, add_env, swap_env
from cli.loader import Loader
from cli.utils import parse_args, resolve_sweep, use_project, print_list_as_columns
from zpy.files import read_json, to_pathlib_path
SMALL_WIDTH = 12
MEDIUM_WIDTH = 24
LARGE_WIDTH = 36
UUID_WIDTH = 36
DATETIME_WIDTH = 27
@click.group(context_settings=dict(token_normalize_func=str.lower))
def cli():
"""zpy cli
Zumo Labs cli which is used to create, get, list, upload objects from
the Zumo Labs backend (ragnarok).
"""
initialize_config()
@cli.command("help")
def cli_help():
"""display help
This will display help in order to provide users with more information
on how to use this tool.
"""
click.echo(
"zpy - ZumoLabs command line interface\n"
"\n"
"zpy is a tool used to list, create, upload, download\n"
"objects from the ZumoLabs backend (ragnarok)\n"
"\n"
"app - https://app.zumolabs.ai\n"
"github - https://github.com/ZumoLabs/zpy\n"
"docs - https://github.com/ZumoLabs/zpy/tree/main/docs/cli"
)
@cli.command("login")
@click.argument("username", required=True)
@click.password_option(help="The login password.")
def login(username, password):
"""login to ragnarok
This command will update the zpy config with a token that is fetched
from the backend using account details.
Accounts can be created at: app.zumolabs.ai
Args:
username (str): developer username
password (str): developer password
"""
config = read_config()
endpoint = f"{config['ENDPOINT']}/auth/login/"
r = requests.post(endpoint, auth=HTTPBasicAuth(username, password))
if r.status_code != 200:
click.secho("Login failed.", err=True, fg="red")
return
config["TOKEN"] = r.json()["token"]
write_config(config)
click.echo("Login successful!")
@cli.command("config")
def cli_config():
"""display config
Display current configuration file to developer.
"""
pretty_config = json.dumps(read_config(), indent=2)
click.echo(f"Zpy cli configuration:\n{pretty_config}")
@cli.command("version")
def version():
"""version
Display the zpy cli version.
"""
import zpy
click.echo(f"Version: {zpy.__version__}")
# ------- ENV
@cli.group("env")
def env_group():
"""environment configuration.
Configure the environment for backend calls.
"""
pass
@env_group.command("set")
@click.argument("env")
def set_env(env):
"""switch target environment
This command allows zumo labs developers to swap the endpoint that the cli communicates with.
Args:
env (str): new environment for endpoint
"""
config = read_config()
old_env, old_endpoint = config["ENVIRONMENT"], config["ENDPOINT"]
swap_env(env)
config = read_config()
click.echo("Swapped environment:")
click.echo(f" {old_env} -> {config['ENVIRONMENT']}")
click.echo(f" {old_endpoint} -> {config['ENDPOINT']}")
click.echo("zpy login to fetch token")
@env_group.command("add")
@click.argument("env")
@click.argument("endpoint")
def add_environment(env, endpoint):
"""add a new environment
This command allows you to add an environment to target with backend calls.
Args:
env (str): new environment name identifier
endpoint (str): endpoint for new environment
"""
click.echo(f"Adding environment:")
click.echo(f" ENVIRONMENT: {env}")
click.echo(f" ENDPOINT: {endpoint}")
add_env(env, endpoint)
# ------- DATASET
@cli.group("dataset")
def dataset_group():
"""dataset object.
Dataset is a collection of files.
"""
pass
@dataset_group.command("list")
@click.argument("filters", nargs=-1)
@use_project()
def list_datasets(filters, project=None):
"""list datasets
List datasets from backend with optional FILTERS. Uses PROJECT set via zpy project command when available.
"""
from cli.datasets import fetch_datasets
try:
filters = parse_args(filters)
if project:
filters["project"] = project
except Exception:
click.secho(f"Failed to parse filters: {filters}", fg="yellow", err=True)
return
try:
with Loader("Fetching datasets..."):
datasets = fetch_datasets(filters)
click.echo("Fetched datasets successfully.")
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to fetch datasets {e}.", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
return
tbl = TableLogger(
columns="name,state,files,created_at",
colwidth={
"name": LARGE_WIDTH,
"state": MEDIUM_WIDTH,
"files": SMALL_WIDTH,
"created_at": DATETIME_WIDTH,
},
)
for d in datasets:
tbl(
d["name"],
d["state"],
d["num_files"],
d["created_at"],
)
@dataset_group.command("get")
@click.argument("name")
@click.argument(
"path",
type=click.Path(exists=True, file_okay=False, writable=True, resolve_path=True),
)
@click.argument("format", default="archive")
def get_dataset(name, path, format):
"""get dataset
Download dataset of type DTYPE and name NAME to local PATH from backend.
Args:
name (str): name of dataset
path (str): directory to put zipped dataset
format (str): format for packaging
"""
from cli.datasets import download_dataset
from cli.utils import download_url
try:
output_path = download_dataset(name, path)
click.echo(f"Downloaded dataset '{name}' to {output_path}")
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to download dataset: {e}", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
except NameError as e:
click.secho(f"Failed to download dataset: {e}", fg="yellow", err=True)
@dataset_group.command("upload")
@click.argument("name")
@click.argument("path", type=click.Path(exists=True, dir_okay=False, resolve_path=True))
@use_project(required=True)
def upload_dataset(name, path, project=None):
"""upload dataset
Upload dataset located at PATH to PROJECT and call it NAME. Requires PROJECT to be set via `zpy project`.
Args:
name (str): name of dataset
path (str): path to dataset
project (str): project uuid
"""
from cli.datasets import create_dataset
if to_pathlib_path(path).suffix != ".zip":
click.secho(f"File {path} must be of type zip", fg="red", err=True)
try:
with Loader("Uploading dataset..."):
create_dataset(name, path, project)
click.secho(f"Uploaded dataset {path} with name '{name}'", fg="green")
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to upload dataset: {e}", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
@dataset_group.command("generate")
@click.argument("name")
@click.argument("sim")
@click.argument("number")
@click.argument("args", nargs=-1)
@use_project(required=True)
def create_dataset(name, sim, number, args, project=None):
"""Create a dataset.
Create a dataset object called NAME. This will trigger the generation of data from SIM with NUMBER of runs given the input ARGS. Requires PROJECT to be set via `zpy project`.
Args:
name (str): name of new dataset
sim (str): name of sim dataset is built with
number (str): number of datasets to create
args (List(str)): configuration of sim for this dataset
project (str): project uuid
"""
from cli.datasets import generate_dataset
try:
dataset_config = parse_args(args)
except Exception:
click.secho(f"Failed to parse args: {args}", fg="yellow", err=True)
return
try:
generate_dataset(name, sim, number, dataset_config, project)
click.secho(
f"Generating {number} from sim '{sim}' with config {dataset_config}",
fg="green",
)
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to create dataset: {e}", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
except NameError as e:
click.secho(f"Failed to create dataset: {e}", fg="yellow", err=True)
# ------- PROJECT
@cli.group("project")
def project_group():
"""Project group
Project is a container for the rest of the objects.
"""
pass
@project_group.command("list")
@click.argument("filters", nargs=-1)
def list_projects(filters):
"""list projects
List projects from backend with optional FILTERS.
"""
from cli.projects import fetch_projects
try:
filters = parse_args(filters)
except Exception:
click.secho(f"Failed to parse filters: {filters}", fg="yellow", err=True)
return
try:
with Loader("Fetching projects..."):
projects = fetch_projects(filters)
click.echo("Fetched projects successfully.")
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to fetch projects {e}.", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
return
tbl = TableLogger(
columns="id,name,account,created_at",
colwidth={
"id": UUID_WIDTH,
"name": LARGE_WIDTH,
"account": UUID_WIDTH,
"created_at": DATETIME_WIDTH,
},
)
for p in projects:
tbl(
p["id"],
p["name"],
p["account"],
p["created_at"],
)
@project_group.command("create")
@click.argument("account", type=click.UUID)
@click.argument("name")
def create_project(account, name):
"""Create a project under ACCOUNT called NAME.
See available accounts: zpy account list
"""
from cli.projects import create_project
try:
create_project(account, name)
click.secho(f"Created project '{name}'", fg="green")
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to create project: {e}", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
@project_group.command("set")
@click.argument("project_uuid", type=click.UUID)
def set_project(project_uuid):
"""Set project
Set global PROJECT uuid.
"""
config = read_config()
old_project_uuid = config.get("PROJECT", None)
config["PROJECT"] = str(project_uuid)
write_config(config)
click.echo("Switched project:")
click.echo(f" {old_project_uuid} -> {config['PROJECT']}")
@project_group.command("clear")
def clear_project():
"""Clear project
Clear global PROJECT uuid.
"""
config = read_config()
config.pop("PROJECT")
write_config(config)
click.echo("Cleared global project namespace.")
# ------- SIM
@cli.group("sim")
def sim_group():
"""Sim object
Sim is a 3D scene which is used to generate images.
"""
pass
@sim_group.command("list")
@click.argument("filters", nargs=-1)
@use_project()
def list_sims(filters, project=None):
"""list sims
List sims from backend with optional FILTERS. Uses PROJECT set via zpy project command when available.
"""
from cli.sims import fetch_sims
try:
filters = parse_args(filters)
if project:
filters["project"] = project
except Exception:
click.secho(f"Failed to parse filters: {filters}", fg="yellow", err=True)
return
try:
with Loader("Fetching sims..."):
sims = fetch_sims(filters)
click.echo("Fetched sims successfully.")
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to fetch sims {e}.", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
return
tbl = TableLogger(
columns="name,state,zpy_version,blender_version,created_at",
colwidth={
"name": LARGE_WIDTH,
"state": MEDIUM_WIDTH,
"zpy_version": MEDIUM_WIDTH,
"blender_version": SMALL_WIDTH,
"created_at": DATETIME_WIDTH,
},
)
for s in sims:
tbl(
s["name"],
s["state"],
s["zpy_version"],
s["blender_version"],
s["created_at"],
)
@sim_group.command("get")
@click.argument("name")
@click.argument(
"path",
type=click.Path(exists=True, file_okay=False, writable=True, resolve_path=True),
)
@use_project(required=True)
def get_sim(name, path, project=None):
"""get sim
Download sim with name NAME from backend.
Args:
name (str): name of sim
path (str): directory to put zipped sim
project (str): project uuid
"""
from cli.sims import download_sim
try:
output_path = download_sim(name, path, project)
click.echo(f"Downloaded sim '{name}' to {output_path}")
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to download sim: {e}", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
except NameError as e:
click.secho(f"Failed to download sim: {e}", fg="yellow", err=True)
@sim_group.command("upload")
@click.argument("name")
@click.argument("path", type=click.Path(exists=True, dir_okay=False, resolve_path=True))
@use_project(required=True)
def upload_sim(name, path, project=None):
"""upload sim
Upload sim located at PATH to PROJECT and call it NAME. Requires PROJECT to be set via `zpy project`.
Args:
name (str): name of sim
path (str): path to sim
project (str): project uuid
"""
from cli.sims import create_sim
if to_pathlib_path(path).suffix != ".zip":
click.secho(f"File {path} must be of type zip", fg="red", err=True)
try:
with Loader("Uploading sim..."):
create_sim(name, path, project)
click.secho(f"Uploaded sim {path} with name '{name}'", fg="green")
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to upload sim: {e}", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
@sim_group.command("logs")
@click.argument("name")
@click.argument(
"path",
type=click.Path(exists=True, file_okay=False, writable=True, resolve_path=True),
)
def logs_sim(name, path):
from cli.logs import fetch_logs
try:
fetch_logs("sims", name, path)
click.echo(f"Downloaded {path}/[info/debug/error].log from '{name}'.")
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to fetch logs: {e}", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
except NameError as e:
click.secho(f"Failed to fetch logs: {e}", fg="yellow", err=True)
# ------- ACCOUNT
@cli.group("account")
def account_group():
"""Account object
Accounts are used to interact with the backend.
"""
pass
@account_group.command("list")
@click.argument("filters", nargs=-1)
def list_accounts(filters):
"""list accounts
List accounts from backend with optional FILTERS.
"""
from cli.accounts import fetch_accounts
try:
filters = parse_args(filters)
except Exception:
click.secho(f"Failed to parse filters: {filters}", fg="yellow", err=True)
return
try:
with Loader("Fetching accounts..."):
accounts = fetch_accounts(filters)
click.echo("Fetched accounts successfully.")
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to fetch accounts {e}.", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
return
tbl = TableLogger(
columns="id,type,email,created_at",
colwidth={
"id": UUID_WIDTH,
"type": LARGE_WIDTH,
"email": UUID_WIDTH,
"created_at": DATETIME_WIDTH,
},
)
for p in accounts:
tbl(
p["id"],
p["type"],
p["email"],
p["created_at"],
)
# ------- JOB
@cli.group("job")
def job_group():
"""Job object
Jobs are used in order to perform operations on a set of datasets.
"""
pass
@job_group.command("list")
@click.argument("filters", nargs=-1)
@use_project()
def list_jobs(filters, project=None):
"""
list jobs
List jobs from backend with optional FILTERS. Uses PROJECT set via `zpy project` command when available.
"""
from cli.jobs import fetch_jobs
try:
filters = parse_args(filters)
if project:
filters["project"] = project
except Exception:
click.secho(f"Failed to parse filters: {filters}", fg="yellow", err=True)
return
try:
with Loader("Fetching jobs..."):
jobs = fetch_jobs(filters)
click.echo("Fetched jobs successfully.")
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to fetch jobs {e}.", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
return
tbl = TableLogger(
columns="state,name,operation,created_at",
colwidth={
"state": MEDIUM_WIDTH,
"name": LARGE_WIDTH,
"operation": SMALL_WIDTH,
"created_at": DATETIME_WIDTH,
},
)
for j in jobs:
tbl(j["state"], j["name"], j["operation"], j["created_at"])
@job_group.command("create")
@click.argument("name")
@click.argument("operation", type=click.Choice(["package", "tvt", "train"]))
@click.option(
"filters",
"-f",
multiple=True,
help="Key/value pairs separated by spaces. Passed as query params in the API call to filter data sets.",
)
@click.option(
"configfile",
"--configfile",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
help="Path to json file",
)
@click.option(
"sweepfile",
"--sweepfile",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
help="Path to json file",
)
@use_project(required=True)
def create_job(name, operation, filters, configfile, sweepfile, project=None):
"""create job
Create a job called NAME within PROJECT to perform OPERATION on a group of datasets defined by the FILTERS
provided by -f. Requires PROJECT set via `zpy project`.
"""
from cli.datasets import filter_datasets
from cli.jobs import create_new_job
filtered_datasets = []
for dfilter in filters:
try:
with Loader(f"Filtering datasets by '{dfilter}'..."):
datasets = filter_datasets(dfilter, project)
count = len(datasets)
click.secho(f"Found {count} matching '{dfilter}'")
if count == 0:
continue
dataset_names = list(datasets.values())
print_list_as_columns(dataset_names)
filtered_datasets.extend(datasets.keys())
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to filter datasets {e}", fg="red", err=True)
job_configs = []
if configfile:
config = read_json(configfile)
job_configs.append(config)
click.echo(f"Parsed config file {configfile} : {config}")
elif sweepfile:
sweep_config = read_json(sweepfile)
try:
configs = resolve_sweep(sweep_config)
except Exception as e:
click.secho(
f"Failed to resolve sweep file {sweepfile} {e}", fg="yellow", err=True
)
return
job_configs.extend(configs)
click.echo(f"Parsed sweep file {sweepfile} : {sweep_config}")
else:
job_configs.append(dict())
click.confirm(f"Launch {len(job_configs)} jobs?", abort=True)
for i, config in enumerate(job_configs):
job_name = name if i == 0 else f"{name} {i}"
try:
create_new_job(job_name, operation, config, filtered_datasets, project)
click.secho(
f"Created {operation} job '{job_name}' with config {config}", fg="green"
)
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to create job: {e}", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
click.echo(f"Finished creating {len(job_configs)} jobs with name '{name}'")
@job_group.command("logs")
@click.argument("name")
@click.argument(
"path",
type=click.Path(exists=True, file_okay=False, writable=True, resolve_path=True),
)
def logs_job(name, path):
from cli.logs import fetch_logs
try:
fetch_logs("jobs", name, path)
click.echo(f"Downloaded {path}/[info/debug/error].log from '{name}'.")
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to fetch logs: {e}", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
except NameError as e:
click.secho(f"Failed to fetch logs: {e}", fg="yellow", err=True)
# ------- TRANSFORM
@cli.group("transform")
def transform_group():
"""Transform Operations
Transforms are used on datasets to output a new dataset.
"""
pass
@transform_group.command("list")
@click.argument("filters", nargs=-1)
@use_project()
def list_transforms(filters, project=None):
"""
list transforms
List transforms from backend with optional FILTERS. Also displays available TRANSFORMS. Uses PROJECT set via `zpy project` command when available.
"""
from cli.transforms import fetch_transforms, available_transforms
try:
filters = parse_args(filters)
if project:
filters["project"] = project
except Exception:
click.secho(f"Failed to parse filters: {filters}", fg="yellow", err=True)
return
try:
click.echo(f"Available transforms: {available_transforms()}")
with Loader("Fetching transforms..."):
transforms = fetch_transforms(filters)
click.echo("Fetched transforms successfully.")
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to fetch transforms {e}.", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
return
tbl = TableLogger(
columns="state,operation,input_dataset,created_at",
colwidth={
"state": MEDIUM_WIDTH,
"operation": SMALL_WIDTH,
"input_dataset": LARGE_WIDTH,
"created_at": DATETIME_WIDTH,
},
)
for t in transforms:
tbl(t["state"], t["operation"], t["input_dataset"], t["created_at"])
@transform_group.command("dataset")
@click.argument("name")
@click.argument("operation")
@click.argument("args", nargs=-1)
@use_project(required=True)
def transform_dataset(name, operation, args, project=None):
"""Transform a dataset.
Transform a dataset NAME with OPERATION. This will trigger the transformation of this dataset given the input ARGS. Requires PROJECT to be set via `zpy project`.
Args:
name (str): name of new dataset
operation (str): operation to run on dataset
args (List(str)): configuration of sim for this dataset
project (str): project uuid
"""
from cli.transforms import create_transform
try:
transform_config = parse_args(args)
except Exception:
click.secho(f"Failed to parse args: {args}", fg="yellow", err=True)
return
try:
create_transform(name, operation, transform_config, project)
click.secho(
f"Running {operation} on dataset '{name}' with config {transform_config}",
fg="green",
)
except requests.exceptions.HTTPError as e:
click.secho(f"Failed to create transform: {e}", fg="red", err=True)
if e.response.status_code == 400:
click.secho(str(e.response.json()), fg="red", err=True)
except NameError as e:
click.secho(f"Failed to create transform: {e}", fg="yellow", err=True) | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/cli/cli.py | cli.py |
import json
import os
import random
import shutil
import unittest
from collections import defaultdict
from pathlib import Path
import zpy.client as zpy
from zpy.client_util import remove_n_extensions, format_dataset, write_json
def test_1(**init_kwargs):
"""In local env, simruns exist for config { "run.padding_style": "square" }"""
zpy.init(**init_kwargs)
dataset_config = zpy.DatasetConfig(sim_name="can_v7")
dataset_config.set("run\\.padding_style", "square")
print(dataset_config.config)
previews = zpy.preview(dataset_config)
urls = [preview["url"] for preview in previews]
print(json.dumps(urls, indent=4, sort_keys=True))
def test_2(**init_kwargs):
"""In local env, simruns do NOT exist for config { "run.padding_style": "messy" }"""
zpy.init(**init_kwargs)
dataset_config = zpy.DatasetConfig("can_v7")
dataset_config.set("run\\.padding_style", "messy")
print(dataset_config.config)
previews = zpy.preview(dataset_config)
urls = [preview["url"] for preview in previews]
print(json.dumps(previews, indent=4, sort_keys=True))
print(json.dumps(urls, indent=4, sort_keys=True))
def pretty_print(object):
try:
json.dumps(object)
except TypeError:
print("Unable to serialize the object")
else:
print(json.dumps(object, indent=4))
def test_generate(**init_kwargs):
zpy.init(**init_kwargs)
dataset_config = zpy.DatasetConfig("can_v7")
def datapoint_callback(images, annotations, categories):
pretty_print(images)
pretty_print(annotations)
pretty_print(categories)
dataset = zpy.generate(
dataset_config,
num_datapoints=39,
materialize=True,
# datapoint_callback=datapoint_callback
)
# https://docs.python.org/3/library/unittest.html#module-unittest
class TestClientUtilMethods(unittest.TestCase):
def test_remove_n_extensions(self):
self.assertTrue("/foo" == remove_n_extensions("/foo.rgb.png", 2))
self.assertTrue("/images" == remove_n_extensions("/images.foo.rgb.png", 3))
self.assertTrue("/images.rgb" == remove_n_extensions("/images.rgb.png", 1))
self.assertTrue(
"/foo/images" == remove_n_extensions("/foo/images.rgb.png", 9001)
)
def test_hash(self):
dictA = hash({"foo": 1, "bar": 2})
dictB = hash({"bar": 2, "foo": 1})
self.assertEqual(hash(dictA), hash(dictB))
self.assertEqual(hash(True), hash(True))
self.assertNotEqual(hash(True), hash(False))
self.assertNotEqual(hash(1), hash(2))
self.assertNotEqual(hash([1]), hash([1, 1]))
def test_generate(self):
zpy.init(
project_uuid="feb6e594-55e0-4f87-9e75-5a128221499f",
auth_token="a4a13763b0dc0017b1fc9af890e9efea58fd072074ab9a169e5dcf0633310f28",
)
dataset_config = zpy.DatasetConfig("dumpster_v5.1")
dataset_config.set("run\.padding_style", "messy")
def datapoint_callback(images, annotations, categories):
pretty_print(images)
pretty_print(annotations)
pretty_print(categories)
zpy.generate(
dataset_config, num_datapoints=3, datapoint_callback=datapoint_callback
)
def test_format_dataset(self):
output_dir = Path("/home/korystiger/Downloads/ktest")
if output_dir.exists():
shutil.rmtree(output_dir)
os.makedirs(output_dir / "train", exist_ok=True)
os.makedirs(output_dir / "val", exist_ok=True)
os.makedirs(output_dir / "test", exist_ok=True)
metadata = {
tvt_type: {"categories": {}, "images": {}, "annotations": []}
for tvt_type in ["train", "val", "test"]
}
category_counts = {tvt_type: defaultdict(int) for tvt_type in ["train", "val", "test"]}
def datapoint_callback(images, annotations, categories):
r = random.random()
if r < 0.4:
tvt_type = "train"
elif r < 0.8:
tvt_type = "test"
else:
tvt_type = "val"
for image in images:
new_path = output_dir / tvt_type / image["id"]
shutil.copy(image["output_path"], new_path)
metadata[tvt_type]["images"][image["id"]] = {
**image,
"output_path": str(new_path),
"relative_path": image["id"],
"name": image["id"],
}
filtered_annotations_by_image_id = [a for a in annotations if a['image_id'] == image['id']]
for annotation in filtered_annotations_by_image_id:
category_counts[tvt_type][annotation['category_id']] += 1
metadata[tvt_type]["annotations"].extend(annotations)
for category in categories:
metadata[tvt_type]["categories"][category["id"]] = category
# format_dataset("/home/korystiger/Downloads/malibu-3k-0aac7584.zip",
# # datapoint_callback=datapoint_callback,
# )
# format_dataset('/home/korystiger/Downloads/can_v714-8c288ec8.zip',
# datapoint_callback=datapoint_callback)
format_dataset('/home/korystiger/Downloads/trailer_empty_v5-f9b7ccb2.zip',
datapoint_callback=datapoint_callback)
for tvt_type in ["train", "val", "test"]:
for category_id, count in category_counts[tvt_type].items():
metadata[tvt_type]['categories'][category_id]['count'] = count
print(f"Writing {tvt_type} json...")
path = str(output_dir / tvt_type / "annotations.json")
blob = metadata[tvt_type]
write_json(path, blob)
if __name__ == "__main__":
unittest.main()
# init_kwargs = {
# "base_url": "http://localhost:8000",
# "project_uuid": "aad8e2b2-5431-4104-a205-dc3b638b0dab",
# "auth_token": "214540cbd525f1ecf2bc52e2ddb7ef76801048e3f55aa4b33a9e501b115a736e",
# }
init_kwargs = {
"base_url": "https://ragnarok.stage.zumok8s.org",
"project_uuid": "feb6e594-55e0-4f87-9e75-5a128221499f",
"auth_token": "a19f8a1cef0c1661f7de1fd513d740c499752fc567fc4c6fe6d11fdbce533b65",
}
# init_kwargs = {
# "base_url": "https://ragnarok.stage.zumok8s.org",
# "project_uuid": "91419af0-4815-41e7-9b77-5ef8154148c8", # Compology
# "auth_token": "a51cacaa01082ba5237b49f74cd6ffa5cf88339345383d97bcadd1f99e5f9a01",
# }
# init_kwargs = {
# "base_url": "https://ragnarok.zumok8s.org",
# "project_uuid": "91419af0-4815-41e7-9b77-5ef8154148c8", # Compology
# "auth_token": "7c1baae380c14a89b558a2fbf5f1c0ad923e61298c3ec87a0bdae6debbe549cb",
# }
# print("Running test_1:")
# test_1(**init_kwargs)
# print("Running test_2:")
# test_2(**init_kwargs)
# test format dataset
# def datapoint_callback(images, annotations, categories):
# pretty_print(images)
# pretty_print(annotations)
# pretty_print(categories)
# input_path = "/mnt/c/Users/georg/Zumo/Datasets/can_v714-8c288ec8.zip"
# dataset_path = extract_zip(input_path)
# format_dataset(dataset_path, datapoint_callback)
# test_generate(**init_kwargs) | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/test/client.py | client.py |
import importlib
import logging
from typing import Tuple
import bpy
import zpy
from bpy.types import Operator
from bpy_extras.io_utils import ImportHelper
log = logging.getLogger(__name__)
if "bpy" in locals():
importlib.reload(zpy)
def registerObjectProperties():
"""Properties applied to object."""
bpy.types.Object.seg = bpy.props.PointerProperty(type=SegmentableProperties)
def registerSceneProperties():
"""Properties applied to scenes."""
bpy.types.Scene.categories = bpy.props.CollectionProperty(type=CategoryProperties)
bpy.types.Scene.categories_enum = bpy.props.EnumProperty(
name="Category",
description="Category for this object.",
default=None,
items=_category_items,
update=_category_update,
)
class CategoryProperties(bpy.types.PropertyGroup):
"""Segmentation category is a property of one or many objects."""
name: bpy.props.StringProperty(
name="Category Name",
description="String name of the category.",
)
color: bpy.props.FloatVectorProperty(
name="Category Color",
subtype="COLOR",
description="Category color for segmentation.",
)
def _category_items(self, context):
"""Get current categories."""
_categories_enum = []
for i, (name, category) in enumerate(bpy.context.scene.categories.items()):
# Add category to ENUM tuple
_categories_enum.append(
(
# First item is used for Python access.
str(i),
name,
zpy.color.frgb_to_hex(category.color),
)
)
return _categories_enum
def _category_update(self, context):
"""Update the category."""
if context.selected_objects:
# Use the value of the category enum dropdown
category = context.scene.categories[int(context.scene.categories_enum)]
for obj in zpy.objects.for_obj_in_selected_objs(context):
zpy.objects.segment(
obj=obj,
name=category.name,
color=category.color,
as_category=True,
)
class SegmentableProperties(bpy.types.PropertyGroup):
category_name: bpy.props.StringProperty(
name="Category Name",
description="String name of the category.",
default="",
)
category_color: bpy.props.FloatVectorProperty(
name="Category Color",
subtype="COLOR",
description="Category color for segmentation.",
)
instance_name: bpy.props.StringProperty(
name="Instance Name",
description="String name of the instance.",
default="",
)
instance_color: bpy.props.FloatVectorProperty(
name="Instance Color",
subtype="COLOR",
description="Instance color for segmentation.",
)
class SegmentInstanceMany(Operator):
"""Segment the selected objects/parts.
Each object will be segmented as a unique object.
"""
bl_idname = "object.zpy_segment_instance_many"
bl_label = "Segment Instance (Many)"
bl_options = {"REGISTER", "UNDO"}
@classmethod
def poll(cls, context):
return len(context.selected_objects) > 0
def execute(self, context):
context.space_data.shading.color_type = "OBJECT"
for obj in zpy.objects.for_obj_in_selected_objs(context):
zpy.objects.segment(obj=obj, name=obj.name)
return {"FINISHED"}
class SegmentInstanceSingle(Operator):
"""Segment the selected objects/parts.
All objects will be segmented as a single instance.
"""
bl_idname = "object.zpy_segment_instance_single"
bl_label = "Segment Instance (Single)"
bl_options = {"REGISTER", "UNDO"}
@classmethod
def poll(cls, context):
return len(context.selected_objects) > 0
def execute(self, context):
context.space_data.shading.color_type = "OBJECT"
# Pick a random color and instance name
_name = context.selected_objects[0].name
_color = zpy.color.random_color(output_style="frgb")
for obj in zpy.objects.for_obj_in_selected_objs(context):
zpy.objects.segment(obj=obj, name=_name, color=_color)
return {"FINISHED"}
class VisualizeInstance(Operator):
"""Visualize the instance colors on objects in scene."""
bl_idname = "object.zpy_visualize_instance"
bl_label = "Visualize Instances"
bl_options = {"REGISTER", "UNDO"}
def execute(self, context):
context.space_data.shading.color_type = "OBJECT"
# Loop through all objects in the scene
for obj in context.scene.objects:
if not obj.type == "MESH":
continue
context.view_layer.objects.active = obj
if obj.seg.instance_color is not None:
obj.color = zpy.color.frgb_to_frgba(obj.seg.instance_color)
else:
obj.color = zpy.color.default_color(output_style="frgba")
return {"FINISHED"}
class VisualizeCategory(Operator):
"""Visualize the category colors on objects in scene."""
bl_idname = "object.zpy_visualize_category"
bl_label = "Visualize Categories"
bl_options = {"REGISTER", "UNDO"}
def execute(self, context):
context.space_data.shading.color_type = "OBJECT"
# Loop through all objects in the scene
for obj in context.scene.objects:
if not obj.type == "MESH":
continue
context.view_layer.objects.active = obj
if obj.seg.category_color is not None:
obj.color = zpy.color.frgb_to_frgba(obj.seg.category_color)
else:
obj.color = zpy.color.default_color()
return {"FINISHED"}
class ResetSegData(Operator):
"""Reset the segmentation data on the selected objects/parts."""
bl_idname = "object.zpy_reset_seg_data"
bl_label = "Reset Segmentation Data"
bl_options = {"REGISTER", "UNDO"}
@classmethod
def poll(cls, context):
return len(context.selected_objects) > 0
def execute(self, context):
context.space_data.shading.color_type = "OBJECT"
for obj in context.selected_objects:
# Only meshes or empty objects TODO: Why the empty objects
if not (obj.type == "MESH" or obj.type == "EMPTY"):
continue
obj.seg.instance_name = ""
obj.seg.instance_color = zpy.color.default_color(output_style="frgb")
obj.seg.category_name = "default"
obj.seg.category_color = zpy.color.default_color(output_style="frgb")
obj.color = zpy.color.default_color(output_style="frgba")
return {"FINISHED"}
def _reset_categories(context):
"""Reset the scene categories."""
# Clean out the scene-level category dict
for _ in range(len(context.scene.categories)):
context.scene.categories.remove(0)
# Reset all categories
for obj in zpy.objects.for_obj_in_selected_objs(context):
obj.seg.category_name = "default"
obj.seg.category_color = zpy.color.default_color(output_style="frgb")
obj.color = zpy.color.default_color(output_style="frgba")
def _add_category(context, name: str = None, color: Tuple[float] = None) -> None:
"""Add category to enum category property."""
if name in context.scene.categories.keys():
log.warning(f"Skipping duplicate category {name}.")
return
if color is None:
color = zpy.color.random_color(output_style="frgb")
log.info(f"Choosing random color for category {name}: {color}")
# Add category to categories dict
new_category = context.scene.categories.add()
new_category.name = name
new_category.color = color
class CategoriesFromText(Operator):
"""Populate categories from text block."""
bl_idname = "object.zpy_categories_from_text"
bl_label = "Categories from Text"
def execute(self, context):
# BUG: Clicking "Text" resets all the categories and their colors
txt = bpy.data.texts
if "categories" in txt.keys():
category_text = txt["categories"]
else:
txt.new("categories")
category_text = txt["categories"]
assert (
category_text is not None
), "Category text block must exist for segmentation."
# Activate the categories text block in the text editor
for area in context.screen.areas:
if area.type == "TEXT_EDITOR":
space = area.spaces.active
space.text = category_text
_reset_categories(context)
for line in category_text.lines:
_add_category(context, name=line.body)
return {"FINISHED"}
class CategoriesFromZUMOJSON(Operator, ImportHelper):
"""Populate categories from Zumo JSON."""
bl_idname = "object.zpy_categories_from_zumo"
bl_description = "Categories from Zumo JSON"
bl_label = "Import"
filter_glob: bpy.props.StringProperty(default="*.json", options={"HIDDEN"})
def execute(self, context):
zumo_json = zpy.files.read_json(self.filepath)
categories = zumo_json.get("categories", None)
assert categories is not None, "ZUMO JSON does not have categories."
_reset_categories(context)
for category in categories.values():
_add_category(
context,
name=category.get("name", None),
color=category.get("color", None),
)
return {"FINISHED"}
class SCENE_PT_SegmentPanel(bpy.types.Panel):
"""UI for the addon that is visible in Blender."""
bl_idname = "SCENE_PT_SegmentPanel"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_label = "Segment"
bl_category = "ZPY"
bl_options = {"DEFAULT_CLOSED"}
def draw(self, context):
layout = self.layout
row = layout.row()
row.label(text="Segment Selected Objects")
row = layout.row()
row.operator(
"object.zpy_segment_instance_single",
text="Single",
icon="USER",
)
row.operator(
"object.zpy_segment_instance_many",
text="Many",
icon="COMMUNITY",
)
row = layout.row()
row.prop(context.scene, "categories_enum", text="")
row = layout.row()
row.label(text="Visualize")
row = layout.row()
row.operator(
"object.zpy_visualize_instance",
text="Visualize Instances",
icon="HIDE_OFF",
)
row = layout.row()
row.operator(
"object.zpy_visualize_category",
text="Visualize Categories",
icon="HIDE_OFF",
)
row = layout.row()
row.label(text="Load Categories")
row = layout.row()
row.operator(
"object.zpy_categories_from_text",
text="Text",
icon="TEXT",
)
row.operator(
"object.zpy_categories_from_zumo",
text="Json",
icon="FILEBROWSER",
)
row = layout.row()
row.label(text="Selected Object Data")
row = layout.row()
row.operator(
"object.zpy_reset_seg_data",
text="Reset Seg Data",
icon="FILE_REFRESH",
)
row = layout.row()
row.label(text="Instance")
row = layout.row()
row.prop(context.object.seg, "instance_name", text="")
row.prop(context.object.seg, "instance_color", text="")
row = layout.row()
row.label(text="Category")
row = layout.row()
row.prop(context.object.seg, "category_name", text="")
row.prop(context.object.seg, "category_color", text="")
row = layout.row() | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy_addon/segment_panel.py | segment_panel.py |
import importlib
import logging
from pathlib import Path
import bpy
import zpy
import gin
from bpy.types import Operator
log = logging.getLogger(__name__)
if "bpy" in locals():
importlib.reload(zpy)
class RunOperator(Operator):
"""Launch the run script in Blender's texts."""
bl_idname = "scene.zpy_run"
bl_label = "Run Sim"
bl_description = "Launch the run script in Blender's texts."
bl_category = "ZPY"
bl_options = {"REGISTER"}
def execute(self, context):
# Set the logger levels
zpy.logging.set_log_levels("info")
# Get the run text
run_text = bpy.data.texts.get("run", None)
if run_text is None:
raise ValueError(
'Running a sim requires a run text, could not find in text with name "run".'
)
# HACK: Gin will complain when this module is re-initialized
gin.enter_interactive_mode()
with gin.unlock_config():
run_text_as_module = run_text.as_module()
# Parse the gin-config text block
zpy.blender.parse_config("config")
# Execute the run function inside the run text
run_text_as_module.run()
return {"FINISHED"}
class RenderOperator(Operator):
"""Render out single image (rgb, segmented, depth)."""
bl_idname = "scene.zpy_render"
bl_label = "Render Frame"
bl_description = "Render out segmented images."
bl_category = "ZPY"
bl_options = {"REGISTER"}
@classmethod
def poll(cls, context):
# TODO: Make sure sim is good to render?
return True
def execute(self, context):
context.space_data.shading.color_type = "OBJECT"
# Image names
rgb_image_name = zpy.files.make_rgb_image_name(0)
cseg_image_name = zpy.files.make_cseg_image_name(0)
iseg_image_name = zpy.files.make_iseg_image_name(0)
depth_image_name = zpy.files.make_depth_image_name(0)
# Output path
output_path = Path(context.scene.zpy_output_path)
# Save renders to file
zpy.render.render(
rgb_path=output_path / rgb_image_name,
iseg_path=output_path / iseg_image_name,
cseg_path=output_path / cseg_image_name,
depth_path=output_path / depth_image_name,
)
return {"FINISHED"}
class SCENE_PT_ExecutePanel(bpy.types.Panel):
"""UI for the addon that is visible in Blender."""
bl_idname = "SCENE_PT_ExecutePanel"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_label = "Execute"
bl_category = "ZPY"
def draw(self, context):
layout = self.layout
row = layout.row()
row.operator(
"scene.zpy_render",
text="Render",
icon="FILE_IMAGE",
)
row = layout.row()
row.operator(
"scene.zpy_run",
text="Run (Debug)",
icon="TRIA_RIGHT",
) | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy_addon/execute_panel.py | execute_panel.py |
import importlib
import logging
from pathlib import Path
import bpy
import zpy
log = logging.getLogger(__name__)
if "bpy" in locals():
importlib.reload(zpy)
def registerSceneProperties():
"""Properties applied to scenes."""
bpy.types.Scene.zpy_gin_config_path = bpy.props.StringProperty(
name="",
description="Path to a gin config file.",
default="",
subtype="FILE_PATH",
update=_load_gin_config,
)
bpy.types.Scene.zpy_runpy_path = bpy.props.StringProperty(
name="",
description="Path to run.py file",
default="",
subtype="FILE_PATH",
update=_load_runpy,
)
bpy.types.Scene.zpy_template_dir = bpy.props.StringProperty(
name="",
description="Path to script template directory.",
default=str(zpy.assets.script_template_dir()),
subtype="DIR_PATH",
)
def _load_gin_config(self, context) -> None:
"""Load gin config from file."""
bpy.ops.scene.zpy_load_gin_config()
def _load_runpy(self, context) -> None:
"""Load run.py from file."""
bpy.ops.scene.zpy_load_runpy()
class LoadGinConfigOperator(bpy.types.Operator):
"""Load gin config from file."""
bl_idname = "scene.zpy_load_gin_config"
bl_label = "Load gin config from file."
bl_description = "Load gin config from file."
bl_category = "ZPY"
bl_options = {"REGISTER"}
# Default name of the texts in Blender when loading
DEFAULT_TEXT_NAME = "config"
def execute(self, context):
zpy.blender.load_text_from_file(
bpy.path.abspath(context.scene.zpy_gin_config_path),
text_name=self.DEFAULT_TEXT_NAME,
)
return {"FINISHED"}
class PushGinConfigOperator(bpy.types.Operator):
"""Push gin config to file."""
bl_idname = "scene.zpy_push_gin_config"
bl_label = "Push gin config to file."
bl_description = "Push gin config to file."
bl_category = "ZPY"
bl_options = {"REGISTER"}
def execute(self, context):
_text = bpy.data.texts[LoadGinConfigOperator.DEFAULT_TEXT_NAME].as_string()
with open(bpy.path.abspath(context.scene.zpy_gin_config_path), "w") as _file:
_file.write(_text)
return {"FINISHED"}
class LoadRunpyOperator(bpy.types.Operator):
"""Load run.py from file."""
bl_idname = "scene.zpy_load_runpy"
bl_label = "Load run.py from file."
bl_description = "Load run.py from file."
bl_category = "ZPY"
bl_options = {"REGISTER"}
# Default name of the texts in Blender when loading
DEFAULT_TEXT_NAME = "run"
def execute(self, context):
zpy.blender.load_text_from_file(
bpy.path.abspath(context.scene.zpy_runpy_path),
text_name=self.DEFAULT_TEXT_NAME,
)
return {"FINISHED"}
class PushRunpyOperator(bpy.types.Operator):
"""Push run.py to file."""
bl_idname = "scene.zpy_push_runpy"
bl_label = "Push run.py to file."
bl_description = "Push run.py to file."
bl_category = "ZPY"
bl_options = {"REGISTER"}
def execute(self, context):
_text = bpy.data.texts[LoadRunpyOperator.DEFAULT_TEXT_NAME].as_string()
with open(bpy.path.abspath(context.scene.zpy_runpy_path), "w") as _file:
_file.write(_text)
return {"FINISHED"}
class SCENE_PT_ScriptPanel(bpy.types.Panel):
"""UI for the addon that is visible in Blender."""
bl_idname = "SCENE_PT_ScriptPanel"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_label = "Script"
bl_category = "ZPY"
bl_options = {"DEFAULT_CLOSED"}
def draw(self, context):
layout = self.layout
scene = context.scene
row = layout.row()
row.operator(
"scene.zpy_load_templates",
text="Load Templates",
icon="TEXT",
)
row = layout.row()
row.label(text="Run.py Path")
row = layout.row()
row.prop(scene, "zpy_runpy_path", expand=True)
row = layout.row()
row.operator(
"scene.zpy_load_runpy",
text="Pull",
icon="IMPORT",
)
row.operator(
"scene.zpy_push_runpy",
text="Push",
icon="EXPORT",
)
row = layout.row()
row.label(text="Gin Config Path")
row = layout.row()
row.prop(scene, "zpy_gin_config_path")
row = layout.row()
row.operator(
"scene.zpy_load_gin_config",
text="Pull",
icon="IMPORT",
)
row.operator(
"scene.zpy_push_gin_config",
text="Push",
icon="EXPORT",
)
class LoadTemplatesOperator(bpy.types.Operator):
"""Loads templates for run.py and gin config."""
bl_idname = "scene.zpy_load_templates"
bl_label = "Loads templates for run.py and gin config."
bl_description = "Loads templates for run.py and gin config."
bl_category = "ZPY"
bl_options = {"REGISTER"}
def execute(self, context):
bpy.ops.text.zpy_load_ginconfig_template()
bpy.ops.text.zpy_load_runpy_template()
return {"FINISHED"}
class TEXT_PT_LoadRunPyTemplateOperator(bpy.types.Operator):
"""Opens the run.py template."""
bl_idname = "text.zpy_load_runpy_template"
bl_label = "Open zpy run.py template."
def execute(self, context):
template_dir = Path(bpy.path.abspath(context.scene.zpy_template_dir))
template_path = template_dir / "run.py"
zpy.blender.load_text_from_file(
template_path,
text_name=LoadRunpyOperator.DEFAULT_TEXT_NAME,
open_text=True,
)
return {"FINISHED"}
class TEXT_PT_LoadGinConfigTemplateOperator(bpy.types.Operator):
"""Opens the gin config template."""
bl_idname = "text.zpy_load_ginconfig_template"
bl_label = "Open gin config template."
def execute(self, context):
template_dir = Path(bpy.path.abspath(context.scene.zpy_template_dir))
template_path = template_dir / "config.gin"
zpy.blender.load_text_from_file(
template_path,
text_name=LoadGinConfigOperator.DEFAULT_TEXT_NAME,
open_text=True,
)
return {"FINISHED"}
def script_template_menu(self, context):
self.layout.separator()
self.layout.operator("text.zpy_load_runpy_template", text="(zpy) Run Script")
self.layout.operator("text.zpy_load_ginconfig_template", text="(zpy) Config Text") | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy_addon/script_panel.py | script_panel.py |
import importlib
import logging
import subprocess
import sys
from pathlib import Path
import bpy
log = logging.getLogger(__name__)
bl_info = {
"name": "zpy",
"author": "Zumo Labs",
# TODO: Keep up to date with $ZPY_VERSION
"version": (1, 0, 0),
# TODO: Keep up to date with $BLENDER_VERSION
"blender": (2, 92, 0),
"location": "View3D > Properties > zpy",
"description": "Synthetic data creation tools for Blender.",
"warning": "",
"doc_url": "https://github.com/ZumoLabs/zpy/tree/main/README.md",
"category": "3D View",
}
def install_pip_depenencies():
"""Install pip dependencies required by zpy addon."""
try:
log.info("Installing zpy and dependencies...")
# Upgrade pip with Blender's python
pip_install = [sys.executable, "-m", "pip", "install"]
subprocess.run(pip_install + ["--upgrade", "pip"], check=True)
# Install zpy through PyPI into Blender's python site-package dir
pkg_path = Path(sys.executable).parent.parent / "lib" / "site-packages" / "zpy"
subprocess.run(
pip_install + ["zpy-zumo", "--target", str(pkg_path)], check=True
)
except Exception as e:
log.warning(f"Could not install ZPY and dependencies: {e}")
try:
import zpy
except ModuleNotFoundError:
log.exception(
"No zpy module found, please follow developer "
+ "install instructions at https://github.com/ZumoLabs/zpy#install"
)
# TODO: Automatic installation of pip dependencies
# waiting on https://developer.blender.org/T71420
# install_pip_depenencies()
# import zpy
if "bpy" in locals():
log.warning("Reloading zpy_addon files.")
from . import export_panel
from . import output_panel
from . import execute_panel
from . import script_panel
from . import segment_panel
importlib.reload(export_panel)
importlib.reload(output_panel)
importlib.reload(execute_panel)
importlib.reload(script_panel)
importlib.reload(segment_panel)
importlib.reload(zpy)
classes = (
# Properties
segment_panel.CategoryProperties,
segment_panel.SegmentableProperties,
# Object operators
segment_panel.SegmentInstanceSingle,
segment_panel.SegmentInstanceMany,
segment_panel.ResetSegData,
# Scene operators
segment_panel.VisualizeInstance,
segment_panel.VisualizeCategory,
segment_panel.CategoriesFromText,
segment_panel.CategoriesFromZUMOJSON,
output_panel.OpenOutputDirOperator,
output_panel.CleanOutputDirOperator,
execute_panel.RenderOperator,
execute_panel.RunOperator,
export_panel.ExportOperator,
export_panel.OpenExportDirOperator,
export_panel.CleanUpDirOperator,
script_panel.LoadGinConfigOperator,
script_panel.PushGinConfigOperator,
script_panel.LoadRunpyOperator,
script_panel.PushRunpyOperator,
script_panel.LoadTemplatesOperator,
# Panels
output_panel.SCENE_PT_OutputPanel,
execute_panel.SCENE_PT_ExecutePanel,
segment_panel.SCENE_PT_SegmentPanel,
script_panel.SCENE_PT_ScriptPanel,
export_panel.SCENE_PT_ExportPanel,
# Menus
script_panel.TEXT_PT_LoadRunPyTemplateOperator,
script_panel.TEXT_PT_LoadGinConfigTemplateOperator,
)
def register():
"""Register any classes and properties."""
for cls in classes:
try:
log.info(f"Registering class {cls.__name__}")
bpy.utils.register_class(cls)
except Exception as e:
log.warning(f"Exception when registering {cls.__name__}: {e}")
segment_panel.registerObjectProperties()
segment_panel.registerSceneProperties()
output_panel.registerSceneProperties()
export_panel.registerSceneProperties()
script_panel.registerSceneProperties()
# Script templates
bpy.types.TEXT_MT_templates_py.append(script_panel.script_template_menu)
if "use_sculpt_vertex_colors" in dir(bpy.context.preferences.experimental):
bpy.context.preferences.experimental.use_sculpt_vertex_colors = True
def unregister():
"""Unregister any classes and properties."""
for cls in classes:
try:
log.info(f"Un-registering class {cls.__name__}")
bpy.utils.unregister_class(cls)
except Exception as e:
log.warning(f"Exception when un-registering {cls.__name__}: {e}")
# Script templates
bpy.types.TEXT_MT_templates_py.remove(script_panel.script_template_menu)
if __name__ == "__main__":
register() | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy_addon/__init__.py | __init__.py |
import importlib
import logging
import bpy
import zpy
from bpy.types import Operator
log = logging.getLogger(__name__)
if "bpy" in locals():
importlib.reload(zpy)
def registerSceneProperties():
"""Properties applied to scenes."""
bpy.types.Scene.zpy_output_path = bpy.props.StringProperty(
name="Output Path",
description="Output path for rendered images, annotations, etc.",
default=str(zpy.files.default_temp_path()),
subtype="DIR_PATH",
)
class CleanOutputDirOperator(bpy.types.Operator):
"""Clean up output dir."""
bl_idname = "scene.zpy_cleanup_output_dir"
bl_label = "Clean Output Dir"
bl_description = "Clean output dir."
bl_category = "ZPY"
bl_options = {"REGISTER"}
def execute(self, context):
zpy.files.clean_dir(context.scene.zpy_output_path, keep_dir=True)
return {"FINISHED"}
class OpenOutputDirOperator(Operator):
"""Open file browser at output dir."""
bl_idname = "scene.zpy_open_output_dir"
bl_label = "Open Output Dir"
bl_description = "Open file browser at output dir."
bl_category = "ZPY"
bl_options = {"REGISTER"}
def execute(self, context):
zpy.files.open_folder_in_explorer(context.scene.zpy_output_path, make=True)
return {"FINISHED"}
class SCENE_PT_OutputPanel(bpy.types.Panel):
"""UI for the addon that is visible in Blender."""
bl_idname = "SCENE_PT_OutputPanel"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_label = "Output Path"
bl_category = "ZPY"
def draw(self, context):
layout = self.layout
scene = context.scene
row = layout.row()
row.prop(scene, "zpy_output_path", text="")
row = layout.row()
row.operator(
"scene.zpy_open_output_dir",
text="Open",
icon="FILEBROWSER",
)
row.operator(
"scene.zpy_cleanup_output_dir",
text="Clean",
icon="PACKAGE",
) | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy_addon/output_panel.py | output_panel.py |
import importlib
import logging
from pathlib import Path
import bpy
import zpy
log = logging.getLogger(__name__)
if "bpy" in locals():
importlib.reload(zpy)
def registerSceneProperties():
"""Properties applied to scenes."""
bpy.types.Scene.zpy_sim_name = bpy.props.StringProperty(
name="Sim Name",
description="Name of the scene, must match data portal.",
default="default",
)
bpy.types.Scene.zpy_sim_version = bpy.props.StringProperty(
name="Sim Version",
description="Version of the scene, must match data portal.",
default="0",
)
bpy.types.Scene.zpy_export_dir = bpy.props.StringProperty(
name="Export Directory Path",
description="Path to directory for exporting packaged zumo scenes.",
default=str(zpy.files.default_temp_path()),
subtype="DIR_PATH",
)
bpy.types.Scene.zpy_export_path = bpy.props.StringProperty(
name="Export Path",
description="Export path for this zumo scene.",
default=str(zpy.files.default_temp_path()),
subtype="DIR_PATH",
)
class OpenExportDirOperator(bpy.types.Operator):
"""Open file browser at export dir."""
bl_idname = "scene.zpy_open_export_dir"
bl_label = "Open Export Dir"
bl_description = "Open file browser at export dir."
bl_category = "ZPY"
bl_options = {"REGISTER"}
def execute(self, context):
zpy.files.open_folder_in_explorer(context.scene.zpy_export_dir, make=True)
return {"FINISHED"}
class CleanUpDirOperator(bpy.types.Operator):
"""Clean up and package sim into export dir."""
bl_idname = "scene.zpy_cleanup_sim"
bl_label = "Clean Up Export Dir"
bl_description = "Clean up export dir."
bl_category = "ZPY"
bl_options = {"REGISTER"}
def execute(self, context):
log.info("Cleaning up scene.")
# Remove any backup blender files
zpy.files.remove_files_with_suffix(
path=context.scene.zpy_export_path,
exts=[".blend1", ".blend2", ".blend3"],
)
# TODO: Scene based clean up collections and objects listings (in the text editor)
# TODO: Remove the custom scene scripts that are not needed for staging (keep run, config, categories for now)
return {"FINISHED"}
class ExportOperator(bpy.types.Operator):
"""Export sim for ingest to Data Portal."""
bl_idname = "scene.zpy_export_sim"
bl_label = "Export sim"
bl_description = "Export sim for ingest to Data Portal."
bl_category = "ZPY"
bl_options = {"REGISTER"}
def execute(self, context):
# Shows progress cursor in Blender UI.
log.info("Export Started.")
bpy.context.window_manager.progress_begin(0, 100)
# Clean scene before every export
bpy.ops.scene.zpy_cleanup_sim()
# Create export directory in the Blender filepath
export_dir_name = (
f"{context.scene.zpy_sim_name}_v{context.scene.zpy_sim_version}"
)
export_path = Path(context.scene.zpy_export_dir) / export_dir_name
zpy.files.verify_path(export_path, make=True)
# Set the scene export path
context.scene.zpy_export_path = str(export_path)
# Find missing files before export
log.info("Export Step 1 of 4: Checking for any missing files.")
bpy.context.window_manager.progress_update(10)
_path = zpy.assets.get_asset_lib_path()
if _path is not None:
log.info(f"Running find missing files in {_path}")
bpy.ops.file.find_missing_files(directory=str(_path))
# Fix all the asset paths by packing them into the .blend
# file and then un-packing them into a 'textures' folder.
try:
bpy.ops.wm.save_as_mainfile(
filepath=str(export_path / "main.blend"),
compress=False,
relative_remap=True,
)
log.info("Export Step 2 of 4: Packing files into .blend.")
bpy.context.window_manager.progress_update(30)
bpy.ops.file.make_paths_absolute()
bpy.ops.file.make_paths_relative()
bpy.ops.file.pack_all()
bpy.ops.file.unpack_all(method="WRITE_LOCAL")
bpy.ops.wm.save_as_mainfile(
filepath=str(export_path / "main.blend"),
compress=False,
relative_remap=True,
)
except Exception as e:
self.report({"ERROR"}, str(e))
log.warning(f"Exception when exporting: {e}")
return {"CANCELLED"}
log.info("Export Step 3 of 4: Saving meta-information.")
bpy.context.window_manager.progress_update(70)
# Output scene information in _meta.zumo
zpy.files.write_json(
export_path / "_meta.zumo.json",
zpy.blender.scene_information(),
)
# TODO: Export glTF into zip directory
# Clean up scene before zipping up
bpy.ops.scene.zpy_cleanup_sim()
# Zip up the exported directory for easy upload
log.info("Export Step 4 of 4: Zipping up package.")
bpy.context.window_manager.progress_update(90)
zpy.files.zip_file(
in_path=export_path,
zip_path=Path(context.scene.zpy_export_dir) / f"{export_dir_name}.zip",
)
log.info("Export Completed.")
bpy.context.window_manager.progress_end()
return {"FINISHED"}
class SCENE_PT_ExportPanel(bpy.types.Panel):
"""UI for the addon that is visible in Blender."""
bl_idname = "SCENE_PT_ExportPanel"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_label = "Export"
bl_category = "ZPY"
bl_options = {"DEFAULT_CLOSED"}
def draw(self, context):
layout = self.layout
scene = context.scene
row = layout.row()
row.operator(
"scene.zpy_export_sim",
text="Export Sim",
icon="EXPORT",
)
row = layout.row()
row.prop(scene, "zpy_sim_name", text="Name")
row = layout.row()
row.prop(scene, "zpy_sim_version", text="Version")
row = layout.row()
row.label(text="Export Path")
row = layout.row()
row.prop(scene, "zpy_export_dir", text="")
row = layout.row()
row.operator(
"scene.zpy_open_export_dir",
text="Open",
icon="FILEBROWSER",
)
row.operator(
"scene.zpy_cleanup_sim",
text="Clean",
icon="PACKAGE",
) | zpy-zumo | /zpy-zumo-1.4.1rc7.tar.gz/zpy-zumo-1.4.1rc7/zpy_addon/export_panel.py | export_panel.py |
# Zpy
[](https://travis-ci.org/sfstpala/zpy)
[](https://coveralls.io/r/sfstpala/zpy)
[](https://pypi.python.org/pypi/zpy)
Quickly encrypt files with your ssh identity.
Note: **This is experimental software!**
## Links
- [**Download**](https://pypi.python.org/pypi/zpy)
- [**Changelog**](CHANGELOG.md)
- [**Documentation**](doc/)
- [**Builds on Travis-CI**](https://travis-ci.org/sfstpala/zpy)
## Installation
You can install zpy using pip:
pip3 install zpy
See also:
- [**Getting Started Guide**](doc/getting_started.md)
## Development setup
Install the prerequisites:
sudo apt-get install build-essential curl python3-dev
Then run the test suite:
make test
Afte running `make`, you can run zpy by typing `bin/zpy`.
See also:
- [**Hacking Guide**](doc/hacking.md)
## Copyright notice
Copyright (C) 2015 Stefano Palazzo <[email protected]>
This program comes with ABSOLUTELY NO WARRANTY;
This is free software, and you are welcome to redistribute it
under certain conditions; see [COPYING](COPYING) for details.
| zpy | /zpy-0.5.0.tar.gz/zpy-0.5.0/README.md | README.md |
# Zpy Changelog
## [Release 0.4.0](https://github.com/sfstpala/zpy/tree/0.4.0)
- Clarified help text on the --identiy flag, which can read either a private or a public key file.
## [Release 0.3.0](https://github.com/sfstpala/zpy/tree/0.3.0)
- Introduces protocol version 2, which adds a second random key for authentication (see [Theory of Operation](doc/theory_of_operation.md))
- Files encrypted with this release can not be decrypted with earlier releases, but older files are backwards compatible
## [Release 0.2.0](https://github.com/sfstpala/zpy/tree/0.2.0)
- Added more documentation
## [Release 0.1.1](https://github.com/sfstpala/zpy/tree/0.1.1)
- Release tags are signed
- Zpy is now [available on PyPI](https://pypi.python.org/pypi/zpy)
- Added DESCRIPTION.md for PyPI
## [Release 0.1.0](https://github.com/sfstpala/zpy/tree/0.1.0)
- Initial release
| zpy | /zpy-0.5.0.tar.gz/zpy-0.5.0/CHANGELOG.md | CHANGELOG.md |
# Best Practices
## Determining if Zpy is right for you
Zpy encrypts data at rest using your ssh private key. This has implications for the
kind of security zpy provides.
- If your private key is compromised, your encrypted data is compromised too
- If you lose your private key, your data is lost too
Zpy is also not meant for communication. If you want to send encrypted files to
someone else, Zpy is not the right tool.
Consider using [GPG](https://www.gnupg.org/gph/en/manual.html) instead.
If you know about or are interested in Cryptography, read the [Theory of Operation](theory_of_operation.md).
### What should I use it for?
Again, Zpy is new, experimental software. Do not use it for anything critical.
If you are versed in cryptography, look at the source code and read the documetation
before you use it.
Here are some use-case for which zpy should work well:
- Backing up private keys, API credentials, application specific passwords
- Backing up configuration files with secrets in them
- Storing encrypted secrets on Google Drive or similar services
- Encrypting files before copying them onto removable media
### Other tools to consider
- If you are communicating with someone via email, use [GNU Privacy Guard](https://www.gnupg.org/gph/en/manual.html)
- If you are worried about your laptop being lost or storen, use [full disk encryption](https://ssd.eff.org/en/glossary/full-disk-encryption)
- If you want to chat securely, use [Cryptocat](https://crypto.cat/) or another [OTR](https://otr.cypherpunks.ca/) client
- If you want to store passwords securely, use [KeePass](http://keepass.info/)
## Miscellaneous
- You should clone this repository on the machine where your private key is stored in case there is a problem with PyPI or GitHub.
| zpy | /zpy-0.5.0.tar.gz/zpy-0.5.0/doc/best_practices.md | best_practices.md |
# Getting Started
Please note: you are testing experimental software.
See also: [Best Practices](best_practices.md)
## Installation
To install zpy, you need Python 3.4 or later.
### Debian 8, Ubuntu 14.04 or later
# apt-get install python3-pip
# pip3 install zpy
### Fedora 22
# dnf install python3-pip
# pip3 install zpy
### OS X (Homebrew)
$ brew install python3
$ pip3 install zpy
## Encrypting Files
To encrypt a file, simply type:
zpy encrypt secrets.txt
You can also pipe text into zpy:
echo "attack at dawn" | zpy encrypt
Note that the output is Base64 encoded. If you are encrypting large files,
use the `--raw` flag to output raw bytes instead.
zpy encrypt -r secrets.sqlite > secrets.sqlite.zpy
The location of your identity defaults to `~/.ssh/id_rsa`. If you want to
use another identity, use the `--identity` flag:
zpy -i ~/.ssh/my_other_id encrypt secrets.txt
If your private key is encrypted with a password, you will be prompted
to enter it when you execute these commands. You can use `ssh-add` to
cache your passphrase with `ssh-agent`.
## Decrypting Files
Decrypting works the same as encrypting.
zpy decrypt secrets.txt.zpy
The `--raw` flag is not available for decryption. Zpy will automatically
determine whether the input is binary or not.
zpy decrypt secrets.sqlite.zpy > secrets.sqlite
If you have used a different identity for the encryption, provide it in
the same way:
cat secrets.txt.zpy | zpy -i ~/.ssh/my_other_id decrypt
## Updating Zpy
You should regularly check for updates. Zpy encrypted files have a version
number that ensures new versions of the program can decrypt old files, but it
will only use the newest protocol for encrypting files.
To update zpy, run:
pip install --upgrade zpy
You can find the [Changelog on GitHub](https://github.com/sfstpala/zpy/blob/master/CHANGELOG.md).
| zpy | /zpy-0.5.0.tar.gz/zpy-0.5.0/doc/getting_started.md | getting_started.md |
# Hacking
## Setting up the development environment
Zpy is written in Python 3. To set up your development environment, you need:
- Python 3 (>= 3.4)
- The Python 3 headers (e.g. `python3-dev` on Ubuntu)
- Curl, Make, gcc or clang
The compiler and headers are required to build [PyCrypto](https://github.com/dlitz/pycrypto) from source.
On Ubuntu, type:
sudo apt-get install build-essential curl python3-dev
The Makefile has everything you need to build a VirtualEnv so that you don't have to install anything else
on your system:
make
make test
make clean
The VirtualEnv is installed in the current directory. The python interpreter is `bin/python` and zpy is
available at `bin/zpy`.
To install a release version of the program from source, build the Wheel and install it using pip:
bin/python setup.py bdist_wheel
sudo python3 install dist/zpy*.whl
## Tests
Note that `make test` will fail if the code coverage is not 100%. The `test` target also runs various checks.
The tests are included with the module at `zpy.tests`.
To run the test suite exactly as it runs on Travis-CI:
tox
To run the test suite without checking code coverage:
bin/python setup.py test
| zpy | /zpy-0.5.0.tar.gz/zpy-0.5.0/doc/hacking.md | hacking.md |
Zpy shell
=================

**Next level command line shell with script languages, like python or js. Work in shell with your favorite language.**
```
(Zpy) pwd | "Current folder %s" % z | cat
Current folder /Users/XXXX/pytho-nal
(Zpy) j request = require('request')
(Zpy) j ['New York', 'Paris', 'London', 'Berlin', 'San Francisco']
|[for] j request(`http://weathers.co/api.php?city=${z}`, (err,res,body) => sync(JSON.parse(body).data))
|j z.map((data) => `${data.location} has temperature ${data.temperature} Β°F`)
|[for] echo $z >> weather.txt
['', '', '', '', '']
(Zpy) cat weather.txt
New York has temperature -7 Β°F
Paris has temperature -7 Β°F
London has temperature -7 Β°F
Berlin has temperature 4 Β°F
San Francisco has temperature 24 Β°F
```
### Demo

Combine Python and JavaScript together in the terminal is really easy! Just look at [Full Video](https://asciinema.org/a/3fam2wma6o16onjx01xdod0fe) with additional features!
### Pipeline
Zpy ideology says - pipeline make work in terminal great again! Pipeline play the major role in zpy. If you want to use every opportunity of Zpy you should know a few things about the pipeline. Input command will be splited by pipeline character, each of token will be evaluated by shell,python or js interpreter, and tokens will be chained into 1 chain. Zpy pass previous token evaluation result as stdin to next token and you have access to z-variable if token not expects to stdin. So Zpy pipes work like standard unix pipes.
### Syntax
If you want use Zpy you should a few rules.
* Command will be evaluated by **unix system** if you add **`** symbol in begin of the token, or you command begin with [142 linux commands](http://www.mediacollege.com/linux/command/linux-command.html)
* Command will be evaluated by [**javascript**](#javascript) language if you add `j` at begining of token.
* Command will be evaluated by [**Chain Pool**](#chain-pool) if you add specific characters like `for` in the begin the line.
* Command will be evaluated by [**python**](#python) command **in any other case**(by default just evaluate python code - python is default language)
#### From Python to Unix
```
(Zpy) "\n".join(["Zpy so awesome #review #1e%s"%i for i in range(10)]) | grep "5\|6\|8"
Zpy so awesome #review #1e5
Zpy so awesome #review #1e6
Zpy so awesome #review #1e8
```
Generate array with text joined with number from zero to ten, join it by using `\n` character, that we can use it in **unix pipe** cause grep use data splited by `\n` characher. Filtering results by using **grep** command, show only string which contrains 5,6 or 8 digits.
```
(Zpy) "%s.%s" % ('index','php') | cat $z
cat: index.php: No such file or directory
(Zpy) "%s.%s" % ('index','cpp') | touch $z
```
Generate "index.php" as z-value, and send it to next pipe. Last pipe will be evaluated by unix system, we have access to z-variable as like path variable or stdin. So you can write `$z` to access variable `...|touch $z` or stdin `...|grep "index"`.
#### From Unix to Python
```
(Zpy) ls | z.split('\n') | filter(lambda x : 'index' in x, z) | list(z)
['index.py']
```
Get current files, convert it into array and filter it by some condition
We have access to z-variable as `z`.
#### From (Unix or Python) to Js and back
As you can see abowe, we have access to `z` variable from unix or python just use `z` or `$z` variables, this rule works the same way in js.
```
(Zpy) 'http://weathers.co/api.php?city=New+York' | j req(z, (err,res,body) => sync(body)) | j JSON.parse(z).data | "Today is %s and current temperature %s" % (z['date'], z['temperature'])
Today is 03-12-2017 and current temperature -14
```
`python` -> `javascript`-> `javascript`-> `python`
Get current temperature and date from weather.co.
**Note** here we use `sync` function from javascript, this command will send data from **Async** function call (see description in javascript section).
#### Salad of languages
```
(Zpy) j [1,2,3,4].map((e) => `js ${e}`) | ["python + %s" %x for x in z] | "\n".join(z) | sed -e 's/$/ + bash = Zpy/'
python + js 1 + bash = Zpy
python + js 2 + bash = Zpy
python + js 3 + bash = Zpy
python + js 4 + bash = Zpy
```
How about dah? `javascript` -> `python` -> `bash`
### Requirements
* Python 3
* pip3
* compgen
* nodejs or any other js runtime.
### Install
Install via pip
```
pip3 install zpyshell
```
Install from github sources:
```
git clone [email protected]:albertaleksieiev/zpy.git
cd zpy;pip3 install -r requirements.txt
```
If you want use power of js, install [nodejs](https://nodejs.org/en/).
### Run
If you install zpy via pip just run in terminal
```
$ zpy
```
But if you install from sources, navigate to repository root folder and run it like python script
```
python3 Zpy/main.py
```
### Test
```
python3 tests/main_test.py
```
-----
## Languages
Currently zpy support 3 languages
* [Python](#python)
* [Javascript](#javascript)
* Unix shell script
* [Chain Pool](#chain-pool) (additional language)
### More languages
Now Zpy supports python and js, but in the first release, we will add more languages!
## Python
Zpy written in python, so python it's the first language which was be added and supported.
* [Imports](#python-imports)
* [Default imports](#default-imports)
* [Own modules](#adding-new-module)
* [Create python module](#1create-python-module)
* [Import module](#2add-module-to-zpy)
* [Advanced usage pipe and module](#3processing-input-from-pipe)
### Python Imports
If you wan't import some modules into zpy, just add `~` in the begging and type your import command.
```
(Zpy) ~import random,os
(Zpy) ~from PIL import Image
(Zpy) find /Users/XXXX/Pictures -name "*.jpg" | z.split('\n') | z[random.randint(0,len(z))] | Image.open(z).show()
```
Show random Image from your Pictures folder.
**Note: change /Users/XXXX/Pictures to your folder with images**
```
(Zpy) ~import os
(Zpy) pwd | os.listdir(z)
['__pycache__', 'a.txt', 'index.py', 'linux-command-to-list-all-available-commands-and-aliases', 'README.md', 'Zpy']
(Zpy) ~from Zpy.Utils import get_linux_commands
['adduser', 'arch', 'awk', 'bc', 'cal', 'cat', 'chdir', 'chgrp', 'chkconfig', 'chmod', 'chown', 'chroot', 'cksum', 'clear', 'cmp', 'comm', 'cp', 'cron', 'crontab', 'csplit', 'cut', 'date', 'dc', 'dd', 'df', 'diff', 'diff3', 'dir', 'dircolors', 'dirname', 'du', 'echo', 'ed', 'egrep', 'eject', 'env', 'expand', 'expr', 'factor', 'FALSE', 'fdformat', 'fdisk', 'fgrep', 'find', 'fmt', 'fold', 'format', 'free', 'fsck', 'gawk', 'grep', 'groups', 'gzip', 'head', 'hostname', 'id', 'info', 'install', 'join', 'kill', 'less', 'ln', 'locate', 'logname', 'lpc', 'lpr', 'lprm', 'ls', 'man', 'mkdir', 'mkfifo', 'mknod', 'more', 'mount', 'mv', 'nice', 'nl', 'nohup', 'passwd', 'paste', 'pathchk', 'pr', 'printcap', 'printenv', 'printf', 'ps', 'pwd', 'quota', 'quotacheck', 'quotactl', 'ram', 'rcp', 'rm', 'rmdir', 'rpm', 'rsync', 'screen', 'sdiff', 'sed', 'select', 'seq', 'shutdown', 'sleep', 'sort', 'split', 'su', 'sum', 'symlink', 'sync', 'tac', 'tail', 'tar', 'tee', 'test', 'time', 'touch', 'top', 'traceroute', 'tr', 'TRUE', 'tsort', 'tty', 'umount', 'uname', 'unexpand', 'uniq', 'units', 'unshar', 'useradd', 'usermod', 'users', 'uuencode', 'uudecode', 'vdir', 'watch', 'wc', 'whereis', 'which', 'who', 'whoami', 'xargs', 'yes']
```
Print all linux commands defined in zpy.
#### Default imports
If you don't want import general modules like `os` every time when you launch zpy, you can use **default imports**
You just need execute zpy method `add_def_imports`.
```
(Zpy) zpy.add_def_imports("numpy","import numpy as np")
(Zpy) zpy.get_def_imports()
numpy => import numpy as np
```
Done! When you launch Zpy, this modules will be imported automatically. Let's try evaluate something.
```
(Zpy) np.arange(20)
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19]
(Zpy) np.arange(20) | np.std
5.76628129734
```
**Note** Here we use np.std without input arguments, Zpy will pass z-value as 1 argument to function and evaluate it.
Function will be evaluated with z parameter as argument by default, if return type of evaluation is function.
#### Modules
Zpy have some cool things, like modules! Modules is your own script which will be imported by default. Zpy have own zpy module.
```
(Zpy) zpy
<Zpy.languages.zpy.zpy object at 0x10268d2e8>
```
zpy is just python class, which can storage some information (like scripts).
zpy Methods :
- - return list of scripts
- add_script(name) - Currying `add_new_script` method, returns `add_new_script(name=name)`
- add_new_script(name, script) - create new script
- remove_script(name) - remove script
- eval(name, input='') - eval script and send input
- eval_with_input(name) - Currying `eval` method, returns `eval(name=name)`
- last_zcommand() - return last z-command. **Note** after evaluation `last_zcommand()` method and value returns,, last z-command will be `last_zcommand()`
- add_module(module_name, path_to_module_file_py) - add module, it will be available from zpy like `module_name`
- get_modules() - returns all modules
- remove_module(name) - remove module by name, **file will not be deleted**
- as_table(data) - trying to convert string to table data
```
(Zpy) zpy.get_scripts()
(Zpy) zpy.add_new_script("ls", "ls -lah")
(Zpy) zpy.get_scripts()
ls => ls -lah
(Zpy) zpy.eval('ls')
total 408
drwxr-xr-x 9 albert staff 306B Feb 27 22:29 .
drwxr-xr-x 33 albert staff 1.1K Feb 24 22:47 ..
drwxr-xr-x 8 albert staff 272B Feb 27 22:36 .idea
-rw-r--r-- 1 albert staff 6.1K Feb 27 22:13 README.md
drwxr-xr-x 7 albert staff 238B Feb 27 22:35 Zpy
-rw-r--r-- 1 albert staff 685B Feb 27 22:25 index.py
-rw-r--r-- 1 albert staff 182K Feb 1 20:00 linux-command-to-list-all-available-commands-and-aliases
-rw-r--r-- 1 albert staff 36B Feb 27 15:47 random.file
-rw-r--r-- 1 albert staff 24B Feb 27 22:13 zpy.conf
```
Some advanced stuff
```
(Zpy) ~import requests, json
(Zpy) requests.get('http://finance.google.com/finance/info?client=ig&q=NSE:HDFC') | z.text | z.replace('//','') | json.loads(z)[0] | z['pcls_fix']
1375.7
(Zpy) zpy.last_zcommand()
requests.get('http://finance.google.com/finance/info?client=ig&q=NSE:HDFC') | z.text | z.replace('//','') | json.loads(z)[0] | z['pcls_fix']
(Zpy) zpy.last_zcommand()
zpy.last_zcommand()
(Zpy) requests.get('http://finance.google.com/finance/info?client=ig&q=NSE:HDFC') | z.text | z.replace('//','') | json.loads(z)[0] | z['pcls_fix']
1375.7
(Zpy) zpy.last_zcommand() | zpy.add_script("Get stock NSE:HDFC")
(Zpy) zpy.eval('Get stock NSE:HDFC')
1375.7
```
#### Adding new module
You may want to add the module to Zpy functionality written in python, in Zpy you can do this in few steps
##### 1)Create python module
```
(Zpy) pwd
/path
(Zpy) cd to
(Zpy) pwd
/path/to
(Zpy) ['def square(a):','\treturn a * a'] | "\n".join(z) | cat > some_module.py
(Zpy) cat some_module.py
def square(a):
return a * a
```
##### 2)Add module to Zpy
Run zpy method `add_module` from zpy python module.
```
(Zpy) zpy.add_module("some_model","/path/to/some_module.py"
```
Or edit **zpy.conf** file - add name and py file location to [MODULE] section :
```
....
[MODULE]
....
some_model = /path/to/some_module.py
```
*zpy.conf*
And try evaluate method from some_module
```
(Zpy) some_module.square(4)
16
```
##### 3)Processing input from pipe
Passing pipe output to your module function - really easy. You just need declare `zpy_input` in your function argument list :
```
def square(a):
return a * a
def square_from_pipe(zpy_input):
return square(zpy_input)
```
*some_module.py*
```
(Zpy) 12 | some_module.square_from_pipe
144
```
Also, we can use currying if we want implement pow function, we should pass 2 variables - base value and exponent value. But pipe can send only 1 variable, we can pass them as string array and parse them inside our function **OR** we can use carrying,
```
import math
def square(a):
return a * a
def square_from_pipe(zpy_input):
return square(zpy_input)
def power(base, exponent=None):
if exponent is None:
def currying_function(zpy_input):
return math.pow(base, zpy_input)
return currying_function
else:
return math.pow(base, exponent)
```
*zpy.conf*
Universal function power is done! Let's test it
```
(Zpy) some_module.power(2)(2)
4.0
(Zpy) some_module.power(2)(4)
16.0
(Zpy) 5 | some_module.power(2)
32.0
```
## Javascript
Javascript one of the most popular language ever, so zpy work with them. You can use nodejs with some features like File System I/O, or other JS runtime. Special thanks for [PyExecJS](https://github.com/doloopwhile/PyExecJS)! Zpy use this module inside, so you can see the full list of available runtimes in the link above.
Everybody knows javascript use async functions, this is a problem cause pipes do not works async. This problem will be solved bu using [`sync`](#sync-function) or [`sync_err`](#sync_err) functions.
* [Syntax](#js-syntax)
* [Imports](#js-imports)
* [Default imports](#js-default-imports)
* [Async](#async-to-sync)
* [`sync` function](#sync-function)
* [`sync_err` function](#sync_err)
### JS Syntax
Command will be evaluated by **javascript** command if you add `j` at begining of token.
```
(Zpy) j 2 + 3
5
```
### JS Imports
If you wan't import some modules into js chain, use `require`.
```
npm i request --global
...
(Zpy) j request = require('request')
Added new requirements : { [['request', 'request']] }
```
**Note** your modules should be installed globaly
##### JS Default imports
If you want import general modules like `request` every time when you launch Zpy, you can use **default imports**.
You just need execute method `add_def_imports` from `zjs` object.
```
(Zpy) zjs.add_def_imports('request', 'require("request")')
(Zpy) zjs.get_def_imports()
request => require("request")
```
Done!
### Async to Sync
As I wrote above, we should have ability going from async function to sync function, but why we cannot use async without any modification? If we want make a request and after request send data to next chain, request should be fineshed before we go to next chain.
```
(Zpy) j [1,2,3].join("-") | z.split("-")
['1', '2', '3'] //It's work great! Cause js chain not async.
(Zpy) j request('http://weathers.co/api.php?city=New+York', (err,res,body) => "")
{'method': 'GET', 'uri': {'auth': None, 'path': '/api.php?city=New+York', 'host': 'weathers.co', 'hostname': 'weathers.co', 'hash': None, 'query': 'city=New+York', 'protocol': 'http:', 'port': 80, 'search': '?city=New+York', 'href': 'http://weathers.co/api.php?city=New+York', 'pathname': '/api.php', 'slashes': True}, 'headers': {'host': 'weathers.co'}}
```
As we can see result of evaluation request function is object with request properties like href, headers, host etc. Zpy trying convert it to JSON format. If result of evaluation is `function` and this function has attribute `skip_print='zkip_'` zpy skip result of evaluation (it will be helpfull in async function calls). **Zpy do not return evaluation result if this is a func and func has properties `skip_print='zkip_'` or we use `sync`, `sync_err` function call.**
#### `sync` function
`sync` command will send data from **Async** function call and finish current chain. It's realy easy to use.
```
(Zpy) j request('http://weathers.co/api.php?city=New+York', (err,res,body) => sync(body)) | "Python retreive requests results from js `%s`" %z
Python retreive requests results from js `{"apiVersion":"1.0", "data":{ "location":"New York", "temperature":"-14", "skytext":"Light snow", "humidity":"64", "wind":"7.56 km/h", "date":"03-12-2017", "day":"Sunday" } }`
```
#### `sync_err`
Throw error from async function call.
```
(Zpy) j setTimeout(function(){ sync_err('SOME ERROR') },200)
Traceback (most recent call last):
File ....
...
execjs._exceptions.ProgramError: SOME ERROR
```
## Chain Pool
Chain pool is programming language which have a lot of usefull functions inside Zpy. To start use functions like `[for]` just type this keyword after pipe character `|`, like `|[for]`. Square brackets` []` indicate *Chain pool* function `for`. Chain pool take **stdin** as input stdin, and do some work with stdin, what work you enter after `|[CHAIN_FUNCTION]` keyword, you can use your **favorite language** with Chain pool function.
### `[for]` function
`[for]` function iterate throught every item in *array* or *data splited by `\n`* character as stdin and evaluate every iterated item by any other language.
#### Syntax
**`[`** `for` **`]`** `any other language command`, where `[]` is *Chain pool* syntax, `for` - for function.
![Chain pool [for] function](https://raw.githubusercontent.com/albertaleksieiev/zpy/content/img/Chain%20Pool%20%5Bfor%5D%20function.jpg)
Here we generate data by python, simply by typing array initialization keyword, after that we use `[for]` keyword, split this array to 2 stdin arguments, evaluate shell function `ls` for every argument ('folder1' and 'folder2') and finally join result into array, and send into next chain. And in the last chain we just concatenate array by `,` character.
```
(Zpy) ['.', '..'] |[for] ls $z | ','.join(z)
LICENSE.txt
README.md
Zpy ...., parent_folder content
```
Code for diagram above. Generate array, use *Chain pool* function `for` and join results by using ',' character.
```
(Zpy) ~import numpy as np
(Zpy) np.arange(10).reshape(2,5).tolist() |[for] [for] z**2
[[0, 1, 4, 9, 16], [25, 36, 49, 64, 81]]
```
Iterate every row and column and change every value, by `power` function.
#### Examples
```
(Zpy) ~import os
(Zpy) pwd | os.listdir(z) | "Files divided by commma %s" % ",".join(z)
Files divided by commma .idea,__pycache__,a.txt,index.py,linux-command-to-list-all-available-commands-and-aliases,README.md,Zpy
```
Get current directory using shell command, pipe into python code as z-variable and print result of last chain
```
(Zpy) ~from terminaltables import AsciiTable, SingleTable
(Zpy) ls -lah | z.split('\n') | [' '.join(x.split()).split(' ') for x in z] | SingleTable(z).table
ββββββββββββββ¬βββββ¬βββββββββ¬ββββββββ¬βββββββ¬ββββββ¬ββββ¬ββββββββ¬βββββββββββββββββ
β total β 8 β β β β β β β β
ββββββββββββββΌβββββΌβββββββββΌββββββββΌβββββββΌββββββΌββββΌββββββββΌβββββββββββββββββ€
β drwxr-xr-x β 4 β albert β staff β 136B β Mar β 4 β 23:32 β . β
β drwxr-xr-x β 10 β albert β staff β 340B β Mar β 4 β 23:34 β .. β
β -rw-r--r-- β 1 β albert β staff β 0B β Mar β 4 β 23:32 β empty_file.txt β
β -rw-r--r-- β 1 β albert β staff β 9B β Mar β 4 β 23:32 β not_empy.txt β
β β β β β β β β β β
ββββββββββββββ΄βββββ΄βββββββββ΄ββββββββ΄βββββββ΄ββββββ΄ββββ΄ββββββββ΄βββββββββββββββββ
```
Convert ugly result after evaluation `ls -lah` to great table!
**Note** This functionality available inside zpy module `ls -lah | zpy.as_table`
```
(Zpy) ['http://google.com','http://yandex.ru'] |[for] j request(z, (err,res,body) => sync(body))
```
```
(Zpy) `wget -qO- http://example.com | z.split(" ") | filter(lambda x : "head" in x,z) | list(z)
['html>\n<html>\n<head>\n', '\n</head>\n\n<body>\n<div>\n']
(Zpy) `wget -qO- http://example.com | z.split(" ") | filter(lambda x : "head" in x,z) | list(z) | "Total size : %s" % len(z)
Total size : 2
```
Download content from page and count current entrance word 'head'
```
(Zpy) find ./ -name "*.py" | z.split("\n")[:2]
['.//index.py', './/Zpy/languages/LanguageAnalyzer.py']
(Zpy) find ./ -name "*.py" | z.split("\n")[:2] | "\n".join(z) |grep "an"
.//Zppy/languages/LanguageAnalyzer.py
```
First evaluation will find file in current directory and get first 2 results. Second evaluation do the same plus filter results by shell command `grep`
```
(Zpy) ~import re
(Zpy) "https://www.reddit.com/r/books/" | `wget -qO- $z | re.findall(r"Book[^\.].*?",z,re.IGNORECASE) | "COUNT : %s" % len(z)
COUNT : 645
```
```
(Zpy) ~import uuid
(Zpy) uuid.uuid4() | str(z) | cat > random.file
(Zpy) cat random.file
7ff48f51-b31d-44c2-9aaf-428a63099739
```
### **Danger tricks** (Do not evaluate)
```
(Zpy) ~from Zpy.Utils import get_linux_commands
(Zpy) ~import random,os
(Zpy) get_linux_commands() | z[random.randint(0,len(z))] | os.system(z)
staff com.apple.sharepoint.group.1 everyone localaccounts _appserverusr admin _appserveradm _lpadmin _appstore _lpoperator _develope...
```
Get all shell commands declared in Zpy, and execute random one
```
(Zpy) ~import random,os
(Zpy) ['you are lucky','displaysleepnow','lock'] | z[random.randint(0,len(z))] | os.system("pmset %s" %z)
0
```
If you run on OSX, 33% nothing happens
### License
The module is available as open source under the terms of the [MIT License](http://opensource.org/licenses/MIT).
| zpyshell | /zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/README.md | README.md |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.