max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
qt__pyqt__pyside__pyqode/test_sql_fetchMore__QSqlTableModel_QSqlQueryModel/main__QSqlQueryModel.py | gil9red/SimplePyScripts | 117 | 12767950 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtWidgets import QApplication, QTableView
from PyQt5.QtSql import QSqlDatabase, QSqlQueryModel, QSqlQuery
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName('database.sqlite')
if not db.open():
raise Exception(db.lastError().text())
TABLE = 'word2emoji'
query = QSqlQuery()
query.exec(f'SELECT COUNT(*) FROM {TABLE}')
query.next()
TABLE_ROW_COUNT = query.value(0)
def update_window_title():
mw.setWindowTitle(f'{model.rowCount()} / {TABLE_ROW_COUNT}')
app = QApplication([])
model = QSqlQueryModel()
model.rowsInserted.connect(update_window_title)
model.setQuery(f"SELECT * FROM {TABLE}")
mw = QTableView()
mw.setEditTriggers(QTableView.NoEditTriggers)
mw.setModel(model)
mw.resize(600, 480)
mw.show()
update_window_title()
app.exec()
|
dbaas/maintenance/admin/database_maintenance_task.py | didindinn/database-as-a-service | 303 | 12767963 | <reponame>didindinn/database-as-a-service<filename>dbaas/maintenance/admin/database_maintenance_task.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.utils.html import format_html
from ..models import DatabaseMaintenanceTask
class DatabaseMaintenanceTaskAdmin(admin.ModelAdmin):
list_select_related = None
search_fields = ("database__name", "task__id", "task__task_id")
list_filter = [
"database__team", "status",
]
exclude = ("task", "can_do_retry")
actions = None
list_display = (
"database", "database_team", "current_step", "friendly_status",
"maintenance_action", "link_task", "started_at", "finished_at"
)
readonly_fields = (
"database", "link_task", "started_at", "finished_at",
"current_step", "status", "maintenance_action"
)
ordering = ["-started_at"]
def friendly_status(self, maintenance_task):
html_waiting = '<span class="label label-warning">Waiting</span>'
html_running = '<span class="label label-success">Running</span>'
html_error = '<span class="label label-important">Error</span>'
html_success = '<span class="label label-info">Success</span>'
html_rollback = '<span class="label label-info">Rollback</span>'
html_status = ''
if maintenance_task.status == DatabaseMaintenanceTask.WAITING:
html_status = html_waiting
elif maintenance_task.status == DatabaseMaintenanceTask.RUNNING:
html_status = html_running
elif maintenance_task.status == DatabaseMaintenanceTask.ERROR:
html_status = html_error
elif maintenance_task.status == DatabaseMaintenanceTask.SUCCESS:
html_status = html_success
elif maintenance_task.status == DatabaseMaintenanceTask.ROLLBACK:
html_status = html_rollback
return format_html(html_status)
friendly_status.short_description = "Status"
def database_team(self, maintenance_task):
return maintenance_task.database.team.name
database_team.short_description = "Team"
def link_task(self, maintenance_task):
url = reverse(
'admin:notification_taskhistory_change',
args=[maintenance_task.task.id]
)
return format_html(
"<a href={}>{}</a>".format(url, maintenance_task.task.id)
)
link_task.short_description = "Task"
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request, obj=None):
return False
def maintenance_action(self, maintenance_task):
raise NotImplementedError()
maintenance_action.short_description = "Action"
|
components/isceobj/StripmapProc/runCoherence.py | vincentschut/isce2 | 1,133 | 12767967 | <reponame>vincentschut/isce2
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2012 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import logging
import operator
import isceobj
from iscesys.ImageUtil.ImageUtil import ImageUtil as IU
from mroipac.correlation.correlation import Correlation
logger = logging.getLogger('isce.insar.runCoherence')
## mapping from algorithm method to Correlation instance method name
CORRELATION_METHOD = {
'phase_gradient' : operator.methodcaller('calculateEffectiveCorrelation'),
'cchz_wave' : operator.methodcaller('calculateCorrelation')
}
def runCoherence(self, method="phase_gradient"):
logger.info("Calculating Coherence")
# Initialize the amplitude
# resampAmpImage = self.insar.resampAmpImage
# ampImage = isceobj.createAmpImage()
# IU.copyAttributes(resampAmpImage, ampImage)
# ampImage.setAccessMode('read')
# ampImage.createImage()
#ampImage = self.insar.getResampOnlyAmp().copy(access_mode='read')
# Initialize the flattened inteferogram
topoflatIntFilename = self.insar.topophaseFlatFilename
intImage = isceobj.createIntImage()
#widthInt = self.insar.resampIntImage.getWidth()
widthInt = self.insar.topophaseFlatFilename.getWidth()
intImage.setFilename(topoflatIntFilename)
intImage.setWidth(widthInt)
intImage.setAccessMode('read')
intImage.createImage()
# Create the coherence image
cohFilename = topoflatIntFilename.replace('.flat', '.cor')
cohImage = isceobj.createOffsetImage()
cohImage.setFilename(cohFilename)
cohImage.setWidth(widthInt)
cohImage.setAccessMode('write')
cohImage.createImage()
cor = Correlation()
cor.configure()
cor.wireInputPort(name='interferogram', object=intImage)
#cor.wireInputPort(name='amplitude', object=ampImage)
cor.wireOutputPort(name='correlation', object=cohImage)
cohImage.finalizeImage()
intImage.finalizeImage()
#ampImage.finalizeImage()
try:
CORRELATION_METHOD[method](cor)
except KeyError:
print("Unrecognized correlation method")
sys.exit(1)
pass
return None
|
api/tests/opentrons/protocol_engine/actions/test_action_dispatcher.py | anuwrag/opentrons | 235 | 12767978 | <gh_stars>100-1000
"""Tests for the protocol engine's ActionDispatcher."""
from decoy import Decoy
from opentrons.protocol_engine.actions import (
ActionDispatcher,
ActionHandler,
PlayAction,
)
def test_sink(decoy: Decoy) -> None:
"""It should send all actions to the sink handler."""
action = PlayAction()
sink = decoy.mock(cls=ActionHandler)
subject = ActionDispatcher(sink=sink)
subject.dispatch(action)
decoy.verify(sink.handle_action(action))
def test_add_handler(decoy: Decoy) -> None:
"""It should actions to handlers before the sink."""
action = PlayAction()
handler_1 = decoy.mock(cls=ActionHandler)
handler_2 = decoy.mock(cls=ActionHandler)
sink = decoy.mock(cls=ActionHandler)
subject = ActionDispatcher(sink=sink)
subject.add_handler(handler_1)
subject.add_handler(handler_2)
subject.dispatch(action)
decoy.verify(
handler_1.handle_action(action),
handler_2.handle_action(action),
sink.handle_action(action),
)
|
core/scansf.py | faslan1234/socialfish | 2,970 | 12767980 | import nmap
import requests
def nScan(ip):
nm = nmap.PortScanner()
nm.scan(ip, arguments="-F")
for host in nm.all_hosts():
ports = []
protocols = []
states = []
for proto in nm[host].all_protocols():
protocols.append(proto)
lport = nm[host][proto].keys()
for port in lport:
ports.append(port)
states.append(nm[host][proto][port]['state'])
po = []
for p in ports:
n = {
"Port": str(p),
"Name": nm[host][proto][p]['name'],
"Reason": nm[host][proto][p]['reason'],
"State": nm[host][proto][p]['state']
}
po.append(n)
return po |
warehouse/classifiers/models.py | fairhopeweb/warehouse | 3,103 | 12767982 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import CheckConstraint, Column, Integer, Text
from warehouse import db
from warehouse.utils.attrs import make_repr
class Classifier(db.ModelBase):
__tablename__ = "trove_classifiers"
__tableargs__ = CheckConstraint(
"classifier not ilike 'private ::%'",
name="ck_disallow_private_top_level_classifier",
)
__repr__ = make_repr("classifier")
id = Column(Integer, primary_key=True, nullable=False)
classifier = Column(Text, unique=True)
|
tests/data/okta/adminroles.py | sckevmit/cartography | 2,322 | 12767997 | <reponame>sckevmit/cartography
LIST_ASSIGNED_USER_ROLE_RESPONSE = """
[
{
"id": "IFIFAX2BIRGUSTQ",
"label": "Application Administrator",
"type": "APP_ADMIN",
"status": "ACTIVE",
"created": "2019-02-06T16:17:40.000Z",
"lastUpdated": "2019-02-06T16:17:40.000Z",
"assignmentType": "USER",
"_links": {
"assignee": {
"href": "http://{yourOktaDomain}/api/v1/users/00ur32Vg0fvpyHZeQ0g3"
}
}
},
{
"id": "JBCUYUC7IRCVGS27IFCE2SKO",
"label": "Help Desk Administrator",
"type": "HELP_DESK_ADMIN",
"status": "ACTIVE",
"created": "2019-02-06T16:17:40.000Z",
"lastUpdated": "2019-02-06T16:17:40.000Z",
"assignmentType": "USER",
"_links": {
"assignee": {
"href": "http://{yourOktaDomain}/api/v1/users/00ur32Vg0fvpyHZeQ0g3"
}
}
}
]
"""
LIST_ASSIGNED_GROUP_ROLE_RESPONSE = """
[
{
"id": "IFIFAX2BIRGUSTQ",
"label": "Application Administrator",
"type": "APP_ADMIN",
"status": "ACTIVE",
"created": "2019-02-27T14:48:59.000Z",
"lastUpdated": "2019-02-27T14:48:59.000Z",
"assignmentType": "GROUP",
"_links": {
"assignee": {
"href": "http://{yourOktaDomain}/api/v1/groups/00gsr2IepS8YhHRFf0g3"
}
}
},
{
"id": "JBCUYUC7IRCVGS27IFCE2SKO",
"label": "Help Desk Administrator",
"type": "HELP_DESK_ADMIN",
"status": "ACTIVE",
"created": "2019-02-06T16:17:40.000Z",
"lastUpdated": "2019-02-06T16:17:40.000Z",
"assignmentType": "GROUP",
"_links": {
"assignee": {
"href": "http://{yourOktaDomain}/api/v1/users/00ur32Vg0fvpyHZeQ0g3"
}
}
}
]
"""
|
stonesoup/metricgenerator/__init__.py | Red-Portal/Stone-Soup-1 | 157 | 12768007 | # -*- coding: utf-8 -*-
from .base import MetricGenerator
__all__ = ['MetricGenerator']
|
scripts/ui/common.py | Hiwatts/facebook360_dep | 221 | 12768030 | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Common functions used across the UI tabs.
The UI shares several common functions across its tabs. Unlike dep_util, this file
contains functions that specifically reference elements in the tab. This means, if
further extension of the UI is pursued, this file should be reserved for common
functions that are *explicitly* tied to the UI and dep_util for functions that could
be used in contexts outside the UI.
"""
import collections
import datetime
import glob
import os
import shutil
import subprocess
import sys
from PyQt5 import QtCore, QtWidgets
dir_scripts = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dir_root = os.path.dirname(dir_scripts)
sys.path.append(dir_root)
sys.path.append(os.path.join(dir_scripts, "aws"))
sys.path.append(os.path.join(dir_scripts, "render"))
sys.path.append(os.path.join(dir_scripts, "util"))
import dep_util
import glog_check as glog
import scripts.render.config as config
from log_reader import LogReader
from scripts.aws.create import (
get_staging_info,
get_render_pid,
has_render_flag,
run_ssh_command,
)
from scripts.aws.util import AWSUtil
from scripts.render.network import LAN
from scripts.util.system_util import (
get_flags,
get_flags_from_flagfile,
image_type_paths,
run_command,
)
from slider_image_thresholds import SliderWidget
script_dir = os.path.dirname(os.path.realpath(__file__))
scripts_dir = os.path.abspath(os.path.join(script_dir, os.pardir))
dep_dir = os.path.join(scripts_dir, os.pardir)
dep_bin_dir = os.path.join(dep_dir, "build", "bin")
dep_res_dir = os.path.join(dep_dir, "res")
dep_flags_dir = os.path.join(dep_res_dir, "flags")
os.makedirs(dep_flags_dir, exist_ok=True)
source_root = os.path.join(dep_dir, "source")
depth_est_src = os.path.join(source_root, "depth_estimation")
render_src = os.path.join(source_root, "render")
render_scripts = os.path.join(scripts_dir, "render")
type_color_var = "color_variance"
type_fg_mask = "fg_mask"
threshold_sliders = {
# attr: type, printed name, slider index, max value, default value
"noise": [type_color_var, "Noise variance", 1, 1.5e-3, 4e-5],
"detail": [type_color_var, "Detail variance", 2, 2e-2, 1e-3],
"blur": [type_fg_mask, "Blur radius", 1, 20, 2],
"closing": [type_fg_mask, "Closing size", 2, 20, 4],
"thresh": [type_fg_mask, "Threshold", 3, 1, 3e-2],
}
def init(parent):
"""Sets up all the UI global internals (logs, data, and flags) and any
tab specific components.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
parent.is_refreshing_data = True
parent.initialize_paths()
parent.set_default_top_level_paths()
parent.setup_logs()
parent.setup_data()
parent.setup_flags()
if "retrieve_missing_flagfiles" in dir(parent):
parent.retrieve_missing_flagfiles()
if "add_default_flags" in dir(parent):
parent.add_default_flags()
if "setup_thresholds" in dir(parent):
parent.setup_thresholds()
if "add_data_type_validators" in dir(parent):
parent.add_data_type_validators()
if "setup_farm" in dir(parent):
parent.setup_farm()
if "update_run_button_text" in dir(parent):
parent.update_run_button_text()
parent.is_refreshing_data = False
def setup_aws_config(parent):
"""Sets up the configuration of the Kubernetes cluster.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
if parent.parent.is_aws:
create_flagfile = os.path.join(
parent.path_flags, parent.app_name_to_flagfile[parent.app_aws_create]
)
if os.path.exists(create_flagfile):
create_flags = get_flags_from_flagfile(create_flagfile)
if "cluster_size" in create_flags:
spin_num_workers = getattr(
parent.dlg, f"spin_{parent.tag}_farm_num_workers", None
)
spin_num_workers.setValue(int(create_flags["cluster_size"]))
if "instance_type" in create_flags:
dd_ec2 = getattr(parent.dlg, f"dd_{parent.tag}_farm_ec2", None)
dd_ec2.setCurrentText(create_flags["instance_type"])
def setup_farm(parent):
"""Sets up the UI to interact with a LAN cluster.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
initialize_farm_groupbox(parent)
ip_begin, _ = parent.parent.ui_flags.master.rsplit(".", 1)
parent.lan = LAN(f"{ip_begin}.255")
def get_tooltip(parent, app_name):
"""Gets the help tooltip display of a binary.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
app_name (str): Name of the binary.
Returns:
str: Help from the binary.
"""
dir = scripts_dir if app_name.endswith(".py") else dep_bin_dir
tooltip = dep_util.get_tooltip(os.path.join(dir, app_name))
if not tooltip:
parent.log_reader.log_warning(f"Cannot get tooltip for: {app_name}")
return tooltip
def initialize_paths(parent):
"""Initializes paths for scripts and flags depending on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
parent.app_name_to_flagfile = {}
if tag in ["bg", "depth", "export"]:
parent.app_name = "render/render.py"
if tag in ["depth", "export"]:
parent.app_aws_clean = "aws/clean.py"
parent.app_aws_create = "aws/create.py"
parent.app_name_to_flagfile[parent.app_aws_clean] = "clean.flags"
if tag == "calibrate":
parent.app_name = "Calibration"
parent.flagfile_basename = "calibration.flags"
elif tag == "bg":
parent.flagfile_basename = "render_background.flags"
elif tag == "depth":
parent.flagfile_basename = "render_depth.flags"
parent.app_name_to_flagfile[parent.app_aws_create] = "aws_create_video.flags"
elif tag == "export":
parent.flagfile_basename = "render_export.flags"
parent.app_name_to_flagfile[parent.app_aws_create] = "aws_create_export.flags"
parent.app_aws_download_meshes = "aws/download_meshes.py"
parent.app_name_to_flagfile[
parent.app_aws_download_meshes
] = "download_meshes.flags"
parent.app_name_to_flagfile[parent.app_name] = parent.flagfile_basename
parent.tooltip = get_tooltip(parent, parent.app_name)
parent.is_refreshing_data = False
parent.is_process_killed = False
parent.threshs_tooltip = "Click and drag to pan, scroll to zoom in and out"
parent.script_dir = script_dir
def setup_logs(parent):
"""Sets up logging system for dialog on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
LogReader: Reader configured for the current tab.
"""
tag = parent.tag
qt_text_edit = getattr(parent.dlg, f"text_{tag}_log", None)
qt_tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
tab_idx = qt_tab_widget.count() - 1 # log is always the last tab
ts = dep_util.get_timestamp("%Y%m%d%H%M%S.%f")
name = parent.__class__.__name__
log_file = os.path.join(parent.path_logs, f"{name}_{ts}")
log_reader = LogReader(qt_text_edit, parent, log_file)
log_reader.set_tab_widget(qt_tab_widget, tab_idx)
return log_reader
def setup_flagfile_tab(parent):
"""Sets up the flags according to the corresponding flagfile on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
dlg = parent.dlg
qt_text_edit = getattr(dlg, f"text_{tag}_flagfile_edit", None)
qt_btn_save = getattr(dlg, f"btn_{tag}_flagfile_save", None)
qt_text_edit.textChanged.connect(parent.on_changed_flagfile_edit)
qt_btn_save.clicked.connect(parent.save_flag_file)
qt_btn_save.setEnabled(False)
def setup_file_explorer(parent):
"""Creates the file explorer rooted on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
dlg = parent.dlg
parent.fs_tree = dlg.tree_file_explorer
path = parent.path_project
parent.fs_model, parent.fs_tree = dep_util.setup_file_explorer(parent.fs_tree, path)
parent.fs_tree.clicked.connect(lambda: preview_file(parent))
def preview_file(parent):
"""Displays the file and its label on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
dlg = parent.dlg
frame = dlg.label_preview_image
label = dlg.label_preview_path
project = parent.path_project
prefix = f"{project}/"
dep_util.preview_file(parent.fs_model, parent.fs_tree, frame, label, prefix)
def switch_ui_elements_for_processing(parent, gb, state):
"""Switches element interaction when processing on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
state (str): Identifier of the callback state.
"""
# Buttons
parent.update_buttons(gb)
# Switch all other sections, except the file explorer
dlg = parent.dlg
for gbi in dlg.findChildren(QtWidgets.QGroupBox):
if gbi != gb and not gbi.objectName().endswith("_file_explorer"):
gbi.setEnabled(state)
# Switch current group box elements
prefixes = ["cb_", "dd_", "val_", "label_"]
dep_util.switch_objects_prefix(gb, prefixes, state)
# Switch tabs that are not image preview or log
for w in dlg.findChildren(QtWidgets.QWidget):
name = w.objectName()
ignore = name.endswith("_preview") or name.endswith("_log")
if name.startswith("tab_") and not ignore:
w.setEnabled(state)
# Switch other sections
for s in parent.parent.sections:
if s != parent:
dep_util.set_tab_enabled(parent.dlg.w_steps, s.tag, state)
def cancel_process(parent):
"""Stops a running process on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
running_render = False # Render has to be explicitly killed since it runs detached
if parent.is_farm and parent.is_aws:
processes = parent.log_reader.get_processes()
for process in processes:
if process == "run_aws_create" or process.startswith("run_export"):
running_render = True
if running_render:
aws_util = AWSUtil(
parent.path_aws_credentials, region_name=parent.parent.aws_util.region_name
)
_, ip_staging = get_staging_info(aws_util, parent.path_aws_ip_file)
if ip_staging:
render_pid = get_render_pid(parent.path_aws_key_fn, ip_staging)
if render_pid is not None:
run_ssh_command(
parent.path_aws_key_fn, ip_staging, f"kill -9 {render_pid}"
)
parent.log_reader.kill_all_processes()
parent.is_process_killed = True
if "reset_run_button_text" in dir(parent):
parent.reset_run_button_text()
def is_cloud_running_process(parent):
"""Checks if a render process is being run on the cloud"""
key_fn = parent.path_aws_key_fn
if not parent.is_aws or not parent.is_farm or not os.path.isfile(key_fn):
return False
aws_util = AWSUtil(
parent.path_aws_credentials, region_name=parent.parent.aws_util.region_name
)
_, ip_staging = get_staging_info(
aws_util, parent.path_aws_ip_file, start_instance=False
)
if not ip_staging:
return False
tag = parent.tag
if tag not in ["depth", "export"]:
return False
flag = "run_depth_estimation"
value = tag == "depth"
return has_render_flag(key_fn, ip_staging, flag, value)
def sync_with_s3(parent, gb, subdirs):
"""Synchronizes data from the local directory to S3.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
subdirs (list[str]): Local path to be synced.
"""
run_silently = not parent.parent.ui_flags.verbose
cmds = []
parent.log_reader.log_notice(f"Syncing frames with S3...")
for subdir in subdirs:
local = os.path.join(config.DOCKER_INPUT_ROOT, subdir)
remote = os.path.join(parent.parent.ui_flags.project_root, subdir)
if "_levels" in subdir:
locals = [
os.path.join(local, f"level_{l}") for l in range(len(config.WIDTHS))
]
else:
locals = [local]
# Tar frames
tar_app_path = os.path.join(scripts_dir, "util", "tar_frame.py")
for local_i in locals:
frames = dep_util.get_frame_list(local_i)
if not frames:
if not run_silently:
print(glog.yellow(f"No frames found for S3 syncing in {local_i}"))
continue
for frame in frames:
cmds.append(f"python3.7 {tar_app_path} --src={local_i} --frame={frame}")
cmds.append(f"aws s3 sync {local} {remote} --exclude '*' --include '*.tar'")
p_id = f"sync_results_s3_{parent.tag}"
cmd_and = " && ".join(cmds)
cmd = f'/bin/sh -c "{cmd_and}"'
start_process(parent, cmd, gb, p_id, run_silently)
def on_process_finished(parent, p_id):
"""Callback event handler for a process completing on the specified tab.
Args:
p_id (str): PID of completed process.
"""
if not p_id or p_id.startswith("run"):
parent.log_reader.remove_processes()
else:
parent.log_reader.remove_process(p_id)
parent.refresh_data()
if p_id.startswith("run") and "_export_" not in p_id:
if "update_frame_names" in dir(parent):
parent.update_frame_names()
if "sync_with_s3" in dir(parent) and not parent.is_process_killed:
if parent.parent.is_aws:
parent.sync_with_s3()
if len(parent.log_reader.get_processes()) == 0:
# Re-enable UI elements
switch_ui_elements_for_processing(parent, parent.log_reader.gb, True)
# We may have data to enable other tabs
if p_id.startswith("run"):
[s.refresh_data() for s in parent.parent.sections if s != parent]
if "update_run_button_text" in dir(parent):
parent.update_run_button_text()
parent.is_process_killed = False
def populate_dropdown(parent, gb, dd):
"""Populates a dropdown on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
dd (QtWidgets.QComboBox): Dropdown UI element.
"""
project = parent.parent.path_project
t = dep_util.remove_prefix(gb.objectName(), "gb_")
dd_prev_text = dd.currentText() if dd.count() > 0 else ""
tag = dep_util.remove_prefix(dd.objectName(), f"dd_{t}_")
ps = parent.get_files(tag)
dep_util.populate_dropdown(dd, ps, f"{project}/")
dep_util.update_qt_dropdown(dd, dd_prev_text, add_if_missing=False)
def populate_dropdowns(parent, gb, dd_first=None):
"""Populates the dropdowns on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
dd_first (list[QtWidgets.QGroupBox], optional): Dropdowns to populate first.
"""
if not dd_first:
dd_first = []
for dd in dd_first:
populate_dropdown(parent, gb, dd)
for dd in gb.findChildren(QtWidgets.QComboBox):
if dd not in dd_first:
populate_dropdown(parent, gb, dd)
def refresh_data(parent):
"""Updates UI elements to be in sync with data on disk on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
dlg = parent.dlg
tab = getattr(dlg, f"t_{tag}", None)
if tag in ["bg", "depth", "export"]:
parent.path_rig_json = get_calibrated_rig_json(parent)
if tag == "depth":
parent.update_bg_checkbox()
# This locks the dropdown callbacks while we re-populate them
parent.is_refreshing_data = True
for gb in tab.findChildren(QtWidgets.QGroupBox):
gb.setEnabled(True)
parent.populate_dropdowns(gb)
parent.update_buttons(gb)
if "flagfile_fn" in dir(parent):
sync_data_and_flagfile(parent, parent.flagfile_fn)
parent.disable_tab_if_no_data()
parent.is_refreshing_data = False
def update_flagfile_edit(parent, flagfile_fn, switch_to_flag_tab=False):
"""Updates the edit box for the flagfile on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
switch_to_flag_tab (bool, optional): Whether or not to switch tabs after updating.
"""
if not os.path.isfile(flagfile_fn):
return
tag = parent.tag
dlg = parent.dlg
text = getattr(dlg, f"text_{tag}_flagfile_edit", None)
preview = getattr(dlg, f"w_{tag}_preview", None)
text.setPlainText(open(flagfile_fn).read())
if switch_to_flag_tab:
preview.setCurrentIndex(1)
def update_data_or_flags(
parent, flagfile_fn, flagfile_from_data, switch_to_flag_tab=False
):
"""Updates the flagfile from the UI elements or vice versa on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
flagfile_from_data (bool): Whether to load the flagfile from the data (True) or
vice versa (False).
switch_to_flag_tab (bool, optional): Whether or not to switch tabs after updating.
"""
if not flagfile_fn:
return
flags = get_flags_from_flagfile(flagfile_fn)
if flagfile_from_data:
parent.update_flags_from_data(flags)
else:
parent.update_data_from_flags(flags)
if flagfile_from_data:
# Overwrite flag file
sorted_flags = collections.OrderedDict(sorted(flags.items()))
dep_util.write_flagfile(flagfile_fn, sorted_flags)
# Refresh flagfile edit window
parent.update_flagfile_edit(flagfile_fn, switch_to_flag_tab)
def sync_data_and_flagfile(
parent, flagfile_fn, set_label=True, switch_to_flag_tab=False
):
"""Synchronizes displayed UI elements and contents of the flagfile.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
set_label (bool, optional): Whether or not to update the flagfile label in the UI.
switch_to_flag_tab (bool, optional): Whether or not to switch tabs after updating.
"""
tag = parent.tag
dlg = parent.dlg
label = getattr(dlg, f"label_{tag}_flagfile_path", None)
flagfile = os.path.basename(flagfile_fn)
label.setText(flagfile)
# flag file to data first, then data to flag file for missing info
flagfile_from_data = False
parent.update_data_or_flags(flagfile_fn, flagfile_from_data, switch_to_flag_tab)
parent.update_data_or_flags(flagfile_fn, not flagfile_from_data, switch_to_flag_tab)
def disable_tab_if_no_data(parent, btn_run):
"""Prevents navigation to the tab if the required data is not present on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
btn_run (QtWidgets.QPushButton): UI button for tab switch.
"""
if not btn_run.isEnabled():
dep_util.set_tab_enabled(parent.dlg.w_steps, parent.tag, enabled=False)
def setup_project(parent, mkdirs=False):
"""Retrieves any missing flagfiles and sets the default flags on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
mkdirs (bool, optional): Whether or not to make the defined directories.
"""
parent.is_refreshing_data = True
parent.log_reader.log_header()
parent.refresh_data()
parent.is_refreshing_data = False
def save_flag_file(parent, flagfile_fn):
"""Saves flagfile from the UI to disk on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
"""
if not os.path.isfile(flagfile_fn):
return
tag = parent.tag
dlg = parent.dlg
text_edit = getattr(dlg, f"text_{tag}_flagfile_edit", None)
btn_save = getattr(dlg, f"btn_{tag}_flagfile_save", None)
with open(flagfile_fn, "w") as f:
f.write(text_edit.toPlainText())
f.close()
# Disable save button
btn_save.setEnabled(False)
# Update corresponding groupbox
flagfile_from_data = False # flagfile to data
parent.update_data_or_flags(flagfile_fn, flagfile_from_data)
def update_flagfile(parent, flagfile_fn):
"""Updates the edit box for the flagfile on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
"""
parent.update_data_or_flags(flagfile_fn, flagfile_from_data=True)
def retrieve_missing_flagfiles(parent):
"""Copies the missing flagfiles to project for local modification on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
if tag == "calibrate":
ff_base = "calibration.flags"
elif tag in ["bg", "depth", "export"]:
ff_base = "render.flags"
ffs_expected = [[ff_base, parent.flagfile_fn]]
if tag in ["depth", "export"]:
ff_aws_create = os.path.join(
parent.path_flags, parent.app_name_to_flagfile[parent.app_aws_create]
)
ffs_expected.append(["aws_create.flags", ff_aws_create])
for ff_src_rel, ff_dst_abs in ffs_expected:
if not os.path.isfile(ff_dst_abs):
ff_src_abs = os.path.join(dep_flags_dir, ff_src_rel)
os.makedirs(os.path.dirname(ff_dst_abs), exist_ok=True)
shutil.copyfile(ff_src_abs, ff_dst_abs)
update_flagfile(parent, ff_dst_abs)
def add_default_flags(parent):
"""Retrieves the default flags to the local flagfile on the specified tab from
either the source or scripts binaries.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
default_flags = {}
tag = parent.tag
if tag in ["bg", "depth"]:
default_flags.update(
{
os.path.join(depth_est_src, "DerpCLI.cpp"): {
"max_depth_m",
"min_depth_m",
"resolution",
"var_high_thresh",
"var_noise_floor",
}
}
)
if tag == "depth":
default_flags.update(
{
os.path.join(render_scripts, "setup.py"): {"do_temporal_filter"},
os.path.join(depth_est_src, "TemporalBilateralFilter.cpp"): {
"time_radius"
},
os.path.join(render_src, "GenerateForegroundMasks.cpp"): {
"blur_radius",
"morph_closing_size",
"threshold",
},
}
)
elif tag == "export":
default_flags.update(
{
os.path.join(render_src, "SimpleMeshRenderer.cpp"): {"width"},
os.path.join(render_src, "ConvertToBinary.cpp"): {"output_formats"},
}
)
flagfile_fn = os.path.join(parent.path_flags, parent.flagfile_basename)
flags = get_flags_from_flagfile(flagfile_fn)
for source in default_flags:
if os.path.isfile(source):
source_flags = get_flags(source)
else:
source_flags
desired_flags = default_flags[source]
for source_flag in source_flags:
flag_name = source_flag["name"]
# Only add the default flag if not already present in current flags
if flag_name in desired_flags:
if flag_name not in flags or flags[flag_name] == "":
flags[flag_name] = source_flag["default"]
# Add run flags
if tag == "bg":
flags["run_generate_foreground_masks"] = False
flags["run_precompute_resizes"] = True
flags["run_depth_estimation"] = True
flags["run_convert_to_binary"] = False
flags["run_fusion"] = False
flags["run_simple_mesh_renderer"] = False
flags["use_foreground_masks"] = False
elif tag == "depth":
flags["run_depth_estimation"] = True
flags["run_precompute_resizes"] = True
flags["run_precompute_resizes_foreground"] = True
flags["run_convert_to_binary"] = False
flags["run_fusion"] = False
flags["run_simple_mesh_renderer"] = False
elif tag == "export":
flags["run_generate_foreground_masks"] = False
flags["run_precompute_resizes"] = False
flags["run_precompute_resizes_foreground"] = False
flags["run_depth_estimation"] = False
# Overwrite flag file
sorted_flags = collections.OrderedDict(sorted(flags.items()))
dep_util.write_flagfile(flagfile_fn, sorted_flags)
def get_calibrated_rig_json(parent):
"""Finds calibrated rig in the project.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
str: Name of the calibrated rig (assumes the rig contains "_calibrated.json").
"""
has_log_reader = "log_reader" in dir(parent)
ps = dep_util.get_files_ext(parent.path_rigs, "json", "calibrated")
if len(ps) == 0:
if has_log_reader:
parent.log_reader.log_warning(f"No rig files found in {parent.path_rigs}")
return ""
if len(ps) > 1:
ps_str = "\n".join(ps)
if has_log_reader:
parent.log_reader.log_warning(
f"Too many rig files found in {parent.path_rigs}:\n{ps_str}"
)
return ""
return ps[0]
def update_run_button_text(parent, btn):
"""Updates the text of the Run button depending on the existance of a process
running on the cloud
"""
text_run_btn = "Run"
if is_cloud_running_process(parent):
text_run_btn = "Re-attach"
btn.setText(text_run_btn)
def update_buttons(parent, gb, ignore=None):
"""Enables buttons and dropdowns according to whether or not data is present on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
ignore (list[QtWidgets.QGroupBox], optional): Buttons to not update.
Returns:
tuple[bool, bool, bool]: Whether or not the UI is currently running a process and if it
has all its dropdowns.
"""
if not ignore:
ignore = []
has_all_dropdowns = True
for dd in gb.findChildren(QtWidgets.QComboBox):
if not dd.currentText() and dd not in ignore:
has_all_dropdowns = False
break
has_all_values = True
for v in gb.findChildren(QtWidgets.QLineEdit):
if v.objectName() and not v.text() and v not in ignore:
has_all_values = False
break
is_running = parent.log_reader.is_running()
for btn in gb.findChildren(QtWidgets.QPushButton):
btn_name = btn.objectName()
if btn in ignore:
continue
if btn_name.endswith("_run"):
btn.setEnabled(not is_running and has_all_dropdowns and has_all_values)
elif btn_name.endswith("_cancel"):
btn.setEnabled(is_running)
elif btn_name.endswith("_threshs"):
btn.setEnabled(not is_running and has_all_dropdowns)
elif btn_name.endswith("_view"):
btn.setEnabled(not is_running)
elif btn_name.endswith("_download_meshes"):
btn.setEnabled(not is_running)
return is_running, has_all_dropdowns, is_running
def on_changed_dropdown(parent, gb, dd):
"""Callback event handler for changed dropdown on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
dd (QtWidgets.QComboBox): Dropdown UI element.
"""
if not parent.is_refreshing_data:
name = dd.objectName()
if not name.endswith(
"_farm_ec2"
): # farm_ec2 dropdowns are not used in flagfile
parent.update_flagfile(parent.flagfile_fn)
# Check if we need to update the threshold image
if name.endswith(("_camera", "_frame_bg", "_first")):
# Check if we are already in a threshold tab, else default to color variance
tag = parent.tag
tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
tab_idx = tab_widget.currentIndex()
if tab_widget.widget(tab_idx).objectName().endswith("_fg_mask"):
type = type_fg_mask
else:
type = type_color_var
if "run_thresholds" in dir(parent):
parent.run_thresholds(type)
def on_changed_line_edit(parent, gb, le):
"""Callback event handler for changed line edit on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
le (_): Ignore
"""
if not parent.is_refreshing_data:
parent.update_buttons(gb)
parent.update_flagfile(parent.flagfile_fn)
def setup_groupbox(gb, callbacks):
"""Sets up callbacks for any groupboxes on the specified tab.
Args:
gb (QtWidgets.QGroupBox): Group box for the tab.
callbacks (dict[QtWidgets.QGroupBox, func : QEvent -> _]): Callbacks for the UI elements.
"""
if gb.isCheckable() and gb in callbacks:
gb.toggled.connect(callbacks[gb])
def setup_checkboxes(gb, callbacks):
"""Sets up callbacks for any checkboxes on the specified tab.
Args:
gb (QtWidgets.QGroupBox): Group box for the tab.
callbacks (dict[QtWidgets.QGroupBox, func : QEvent -> _]): Callbacks for the UI elements.
"""
for cb in gb.findChildren(QtWidgets.QCheckBox):
if cb in callbacks:
cb.stateChanged.connect(callbacks[cb])
def setup_dropdowns(parent, gb):
"""Sets up callbacks for any dropdowns on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QComboBox): Group box for the tab.
"""
if "on_changed_dropdown" in dir(parent):
for dd in gb.findChildren(QtWidgets.QComboBox):
dd.currentTextChanged.connect(
lambda state, y=gb, z=dd: parent.on_changed_dropdown(y, z)
)
dd.activated.connect(
lambda state, y=gb, z=dd: parent.on_changed_dropdown(y, z)
)
def setup_lineedits(parent, gb):
"""Sets up callbacks for any line edits on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
if "on_changed_line_edit" in dir(parent):
for le in gb.findChildren(QtWidgets.QLineEdit):
le.textChanged.connect(
lambda state, y=gb, z=le: parent.on_changed_line_edit(y, z)
)
def setup_buttons(parent, gb, callbacks):
"""Sets up callbacks for any buttons on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
callbacks (dict[QtWidgets.QPushButton, func : QEvent -> _]): Callbacks for the UI elements.
"""
for btn in gb.findChildren(QtWidgets.QPushButton):
if btn in callbacks:
callback = callbacks[btn]
else:
name = btn.objectName()
callback = None
if name.endswith("_refresh"):
callback = parent.refresh
elif name.endswith("_run"):
callback = parent.run_process
elif name.endswith("_cancel"):
callback = parent.cancel_process
elif name.endswith("_threshs"):
callback = parent.run_thresholds
elif name.endswith("_logs"):
callback = parent.get_logs
else:
parent.log_reader.log_error(f"Cannot setup button {name}")
if callback:
btn.clicked.connect(callback)
def on_changed_preview(parent):
"""Callback event handler for changed image previews on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
tab_idx = tab_widget.currentIndex()
tab_name = tab_widget.widget(tab_idx).objectName()
if "_threshs_" in tab_name:
if tab_name.endswith("_fg_mask"):
type = type_fg_mask
else:
type = type_color_var
if not parent.is_refreshing_data:
parent.run_thresholds(type)
def setup_preview(parent):
"""Creates preview window in the UI and connects a callback on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
dlg = parent.dlg
btn_log_clear = getattr(dlg, f"btn_{tag}_log_clear", None)
text_log = getattr(dlg, f"text_{tag}_log", None)
preview = getattr(dlg, f"w_{tag}_preview", None)
btn_log_clear.clicked.connect(lambda: text_log.clear())
preview.setCurrentIndex(0)
if "on_changed_preview" in dir(parent):
preview.currentChanged.connect(parent.on_changed_preview)
def setup_data(parent, callbacks=None):
"""Sets up callbacks and initial UI element statuses on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
callbacks (dict[QtWidgets.QGroupBox, func : QEvent -> _]): Callbacks for the UI elements.
"""
tag = parent.tag
dlg = parent.dlg
tab = getattr(dlg, f"t_{tag}", None)
if not callbacks:
callbacks = {}
for gb in tab.findChildren(QtWidgets.QGroupBox):
setup_groupbox(gb, callbacks)
setup_checkboxes(gb, callbacks)
setup_dropdowns(parent, gb)
setup_lineedits(parent, gb)
setup_buttons(parent, gb, callbacks)
# Preview tabs
setup_preview(parent)
def update_noise_detail(parent, noise, detail):
"""Updates noise/detail thresholds interaction on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
noise (float): Noise threshold.
detail (float): Detail threshold.
"""
# Modify flagfile
parent.update_data_or_flags(
parent.flagfile_fn, flagfile_from_data=True, switch_to_flag_tab=False
)
# Update flagfile edit window
parent.update_flagfile_edit(parent.flagfile_fn, switch_to_flag_tab=False)
def update_fg_masks_thresholds(parent, blur, closing, thresh):
"""Updates thresholds and display for the foreground masking on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
blur (int, optional): Gaussian blur radius.
closing (int, optional): Closure (for sealing holes).
thresh (int, optional): Threshold applied to segment foreground and background
"""
# Modify flagfile
parent.update_data_or_flags(
parent.flagfile_fn, flagfile_from_data=True, switch_to_flag_tab=False
)
# Update flagfile edit window
parent.update_flagfile_edit(parent.flagfile_fn, switch_to_flag_tab=False)
def log_missing_image(parent, path_color, cam_id, frame):
"""Prints a warning if an image cannot be located.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
path_color (str): Path to the directory with color images.
cam_id (str): Name of the camera.
frame (str): Name of the frame (0-padded, six digits).
"""
parent.log_reader.log_warning(f"Cannot find frame {cam_id}/{frame} in {path_color}")
def update_thresholds_color_variance(parent, path_color, labels=None):
"""Updates the displayed thresholds for color variance on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
path_color (str): Path to the directory with color images.
labels (list[str], optional): Labels used to filter UI elements to update.
"""
labels = labels if labels is not None else ("_frame_bg", "_first")
dlg = parent.dlg
for dd in parent.dlg.findChildren(QtWidgets.QComboBox):
name = dd.objectName()
if name.endswith(labels):
frame = dd.currentText()
elif name.endswith("_camera"):
cam_id = dd.currentText()
image_path = dep_util.get_level_image_path(path_color, cam_id, frame)
if not image_path:
log_missing_image(parent, path_color, cam_id, frame)
return
tag = parent.tag
w_image = getattr(dlg, f"w_{tag}_threshs_image_{type_color_var}", None)
# Foreground masks are generated at the finest level of the pyramid
res = max(config.WIDTHS)
w_image.color_var.set_image(image_path, res)
noise = float(parent.slider_noise.get_label_text())
detail = float(parent.slider_detail.get_label_text())
project = parent.parent.path_project
fn = dep_util.remove_prefix(image_path, f"{project}/")
getattr(dlg, f"label_{tag}_threshs_filename_{type_color_var}", None).setText(fn)
# Force update
w_image.update_thresholds(noise=noise, detail=detail)
def update_thresholds_fg_mask(parent, paths_color):
"""Updates thresholds and display for the foreground masking using values from UI
on the specified tab."
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
paths_color (list[str]): Paths to the directory with color images.
"""
dlg = parent.dlg
frames = [None] * 2
for dd in parent.dlg.findChildren(QtWidgets.QComboBox):
name = dd.objectName()
if name.endswith("_frame_bg"):
frames[0] = dd.currentText()
elif name.endswith("_first"):
frames[1] = dd.currentText()
elif name.endswith("_camera"):
cam_id = dd.currentText()
bg_image_path = dep_util.get_level_image_path(paths_color[0], cam_id, frames[0])
if not bg_image_path:
log_missing_image(parent, paths_color[0], cam_id, frames[0])
return
fg_image_path = dep_util.get_level_image_path(paths_color[1], cam_id, frames[1])
if not fg_image_path:
log_missing_image(parent, paths_color[1], cam_id, frames[1])
return
tag = parent.tag
w_image = getattr(dlg, f"w_{tag}_threshs_image_{type_fg_mask}", None)
# Foreground masks are generated at the finest level of the pyramid
res = max(config.WIDTHS)
w_image.fg_mask.set_images(bg_image_path, fg_image_path, res)
blur = float(parent.slider_blur.get_label_text())
closing = float(parent.slider_closing.get_label_text())
thresh = float(parent.slider_thresh.get_label_text())
project = parent.parent.path_project
fn_bg = dep_util.remove_prefix(bg_image_path, f"{project}/")
fn_fg = dep_util.remove_prefix(fg_image_path, f"{project}/")
getattr(dlg, f"label_{tag}_threshs_filename_{type_fg_mask}", None).setText(
f"{fn_bg} vs {fn_fg}"
)
# Force update
w_image.update_thresholds(blur=blur, closing=closing, thresh=thresh)
def run_thresholds_after_wait(parent, type):
"""Computes the threshold and displays after a delay on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
type (Union[ColorVariance, ForegroundMask]): Instance where thresholds
can be run.
"""
# Apply flag file values in case it had unsaved changes
parent.save_flag_file()
tag = parent.tag
dlg = parent.dlg
label = getattr(dlg, f"label_{tag}_threshs_tooltip_{type}", None)
label.setToolTip(parent.threshs_tooltip)
getattr(dlg, f"w_{tag}_threshs_image_{type}", None).set_zoom_level(0)
if type == type_color_var:
parent.setup_thresholds_color_variance()
parent.update_thresholds_color_variance()
elif type == type_fg_mask:
parent.setup_thresholds_fg_masks()
parent.update_thresholds_fg_mask()
def run_thresholds(parent, type):
"""Runs thresholding based on values in the UI and update UI display on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
type (Union[ColorVariance, ForegroundMask]): Instance where thresholds are run.
"""
tag = parent.tag
tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
dep_util.switch_tab(tab_widget, f"_threshs_{type}")
# HACK: if we try to draw on a widget too quickly after switching tabs the resulting image
# does not span all the way to the width of the widget. We can wait a few milliseconds to
# let the UI "settle"
parent.timer = QtCore.QTimer(parent.parent)
parent.timer.timeout.connect(lambda: parent.run_thresholds_after_wait(type))
parent.timer.setSingleShot(True)
parent.timer.start(10) # 10ms
def output_has_images(output_dirs):
"""Whether or not outputs already have results.
Args:
output_dirs (list[str]): List of directories where outputs will be saved.
Returns:
bool: Whether or not the output directories all have at least one valid file.
"""
for d in output_dirs:
if dep_util.get_first_file_path(d):
return True
return False
def run_process_check_existing_output(parent, gb, app_name, flagfile_fn, p_id):
"""Run terminal process and raise on failure.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
app_name (str): Name of the binary being executed.
flagfile_fn (str): Name of the flagfile.
p_id (str): PID name of the process to be run.
"""
tag = parent.tag
cb_recompute = getattr(parent.dlg, f"cb_{tag}_recompute", None)
if cb_recompute is not None:
needs_rename = cb_recompute.isChecked()
if needs_rename:
# Rename current output directories using timestamp and create new empty ones
ts = dep_util.get_timestamp()
for d in parent.output_dirs:
if not os.path.isdir(d):
continue
d_dst = f"{d}_{ts}"
parent.log_reader.log_notice(
f"Saving copy of {d} to {d_dst} before re-computing"
)
shutil.move(d, d_dst)
os.makedirs(d, exist_ok=True)
run_process(parent, gb, app_name, flagfile_fn, p_id, not needs_rename)
def start_process(parent, cmd, gb, p_id, run_silently=False):
"""Runs a terminal process and disables UI element interaction.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
cmd (str): Command to run in the terminal.
gb (QtWidgets.QGroupBox): Group box for the tab.
p_id (str): PID name of the process being started.
"""
if not run_silently:
parent.log_reader.log(f"CMD: {cmd}")
parent.log_reader.gb = gb
parent.log_reader.setup_process(p_id)
parent.log_reader.start_process(p_id, cmd)
# Switch to log tab
tag = parent.tag
tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
dep_util.switch_tab(tab_widget, "_log")
# Disable UI elements
parent.switch_ui_elements_for_processing(False)
def run_process(
parent, gb, app_name=None, flagfile_fn=None, p_id="run", overwrite=False
):
"""Runs an application on the terminal, using the associated flagfile.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
app_name (str, optional): Name of the binary being executed.
flagfile_fn (str, optional): Name of the flagfile to supply to the binary. this
will default to the flagfile associated with the binary if unspecified.
p_id (str, optional): PID name of the process being started.
overwrite (bool, optional): Whether or not to overwrite the local flagfile on disk.
"""
# Apply flag file values in case it had unsaved changes
parent.save_flag_file()
if not app_name:
app_name = parent.app_name
is_py_script = app_name.endswith(".py")
dir = scripts_dir if is_py_script else dep_bin_dir
app_path = os.path.join(dir, app_name)
if not os.path.isfile(app_path):
parent.log_reader.log_warning(f"App doesn't exist: {app_path}")
return
if not flagfile_fn:
flagfile_fn = parent.flagfile_fn
if output_has_images(parent.output_dirs) and not overwrite:
run_process_check_existing_output(parent, gb, app_name, flagfile_fn, p_id)
return
cmd = f'{app_path} --flagfile="{flagfile_fn}"'
if is_py_script:
cmd = f"python3.7 -u {cmd}"
start_process(parent, cmd, gb, p_id)
def update_thresholds(parent, gb, type):
"""Updates the displayed thresholds for either color variance or foreground masks.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
type (Union[ColorVariance, ForegroundMask]): Instance where thresholds
can be run.
"""
if type == type_color_var:
noise = parent.slider_noise.get_label_text()
detail = parent.slider_detail.get_label_text()
parent.update_noise_detail(noise, detail)
elif type == type_fg_mask:
blur = parent.slider_blur.get_label_text()
closing = parent.slider_closing.get_label_text()
thresh = parent.slider_thresh.get_label_text()
parent.update_fg_masks_thresholds(blur, closing, thresh)
# Update buttons
parent.update_buttons(gb)
def on_state_changed_partial_360(parent):
"""Callback event handler for changed "partial coverage" checkbox on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
if not parent.is_refreshing_data:
parent.update_flagfile(parent.flagfile_fn)
def on_state_changed_recompute(parent):
"""Callback event handler for changed "recompute" checkbox on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
if not parent.is_refreshing_data:
parent.update_flagfile(parent.flagfile_fn)
def on_state_changed_use_bg(parent, gb):
"""Callback event handler for changed "use background" checkbox on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
if not parent.is_refreshing_data:
parent.update_buttons(gb)
parent.update_flagfile(parent.flagfile_fn)
def on_state_changed_farm(parent, state):
"""Callback event handler for changed "AWS" checkbox on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
state (str): Identifier of the callback state.
"""
parent.is_farm = state > 0
if not parent.is_refreshing_data:
if "update_frame_range_dropdowns" in dir(parent):
parent.update_frame_range_dropdowns()
if "update_run_button_text" in dir(parent):
parent.update_run_button_text()
parent.update_flagfile(parent.flagfile_fn)
def setup_thresholds(parent, types):
"""Sets necessary thresholds apps on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
type (Union[ColorVariance, ForegroundMask]): Instance where thresholds
can be run.
"""
tag = parent.tag
dlg = parent.dlg
for attr in threshold_sliders:
type, printed, num, max, default = threshold_sliders[attr]
if type in types:
name = getattr(dlg, f"label_{tag}_threshs_{num}_name_{type}", None)
hs = getattr(dlg, f"hs_{tag}_threshs_{num}_{type}", None)
label = getattr(dlg, f"label_{tag}_threshs_{num}_{type}", None)
slider = SliderWidget(type, attr, name, printed, hs, label, max, default)
setattr(parent, f"slider_{attr}", slider)
for type in types:
w_image = getattr(dlg, f"w_{tag}_threshs_image_{type}", None)
w_viewer = getattr(dlg, f"w_{tag}_image_viewer_{type}", None)
w_image.set_image_viewer(w_viewer)
def setup_thresholds_color_variance(parent):
"""Sets color variance thresholds apps on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
for slider in [parent.slider_noise, parent.slider_detail]:
slider.setup(callback=parent.on_changed_slider)
def setup_thresholds_fg_masks(parent):
"""Sets up the default thresholds on foreground masks on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
for slider in [parent.slider_blur, parent.slider_closing, parent.slider_thresh]:
slider.setup(callback=parent.on_changed_slider)
def update_data_from_flags(
parent,
flags,
dropdowns=None,
values=None,
checkboxes=None,
labels=None,
prefix=None,
):
"""Updates UI elements from the flags on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flags (dict[str, _]): Flags and their corresponding values.
dropdowns (list[QtWidgets.QComboBox], optional): Dropdowns in the tab.
values (dict[QtWidgets.QLineEdit, _], optional): Map from UI elements to values.
checkboxes (list[QtWidgets.QCheckBox], optional): Checkboxes in the tab.
labels (list[QtWidgets.QLabel], optional): Labels in the tab.
prefix (str, optional): Prefix to append to values in the population of tab values.
"""
if not dropdowns:
dropdowns = []
if not values:
values = []
if not checkboxes:
checkboxes = []
if not labels:
labels = []
flagfile = parent.flagfile_basename
if not prefix:
prefix = f"{parent.parent.path_project}/"
for key, dd in dropdowns:
error = dep_util.update_qt_dropdown_from_flags(flags, key, prefix, dd)
if error:
parent.log_reader.log_warning(f"{flagfile}: {error}")
for key, val in values:
dep_util.update_qt_lineedit_from_flags(flags, key, prefix, val)
for key, cb in checkboxes:
error = dep_util.update_qt_checkbox_from_flags(flags, key, prefix, cb)
if error:
parent.log_reader.log_warning(f"{flagfile}: {error}")
for key, label in labels:
dep_util.update_qt_label_from_flags(flags, key, prefix, label)
def get_notation(parent, attr):
"""Gets standard format for attribute on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
attr (str): Name of the attribute.
Returns:
str: Format string corresponding to the display notation.
"""
if attr in ["noise", "detail", "thresh"]:
notation = "{:.3e}"
elif attr in ["blur", "closing"]:
notation = "{:d}"
else:
parent.log_reader.log_error(f"Invalid slider attr: {attr}")
return notation
def on_changed_slider(parent, slider, value):
"""Callback event handler for changes to a slider UI element on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
slider (QtWidgets.QSlider): Slider UI element.
value (int/float): Value of the slider element.
"""
type = slider.type
attr = slider.attr
notation = get_notation(parent, attr)
if notation == "{:d}":
value = int(value)
slider.set_label(value, notation)
tag = parent.tag
w_image = getattr(parent.dlg, f"w_{tag}_threshs_image_{type}", None)
if w_image.update_thresholds(**{attr: value}):
# Update thresholds in flagfile
parent.update_thresholds(type)
def initialize_farm_groupbox(parent):
"""Sets up the farm render box for the project path, i.e. AWS is displayed if
rendering on an S3 project path and LAN if on a SMB drive.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
dlg = parent.dlg
gb_farm = getattr(dlg, f"gb_{tag}_farm", None)
grid_s3 = getattr(dlg, f"w_{tag}_farm_s3", None)
grid_lan = getattr(dlg, f"w_{tag}_farm_lan", None)
parent.is_aws = parent.parent.is_aws
parent.is_lan = parent.parent.is_lan
if not parent.is_aws and not parent.is_lan:
gb_farm.hide()
elif parent.is_aws:
grid_lan.hide()
elif parent.is_lan:
grid_s3.hide()
parent.ec2_instance_types_cpu = []
parent.ec2_instance_types_gpu = []
if parent.is_aws:
# Get list of EC2 instances
client = parent.parent.aws_util.session.client("ec2")
ts = client._service_model.shape_for("InstanceType").enum
ts = [t for t in ts if not t.startswith(config.EC2_UNSUPPORTED_TYPES)]
parent.ec2_instance_types_cpu = [t for t in ts if t.startswith("c")]
parent.ec2_instance_types_gpu = [t for t in ts if t.startswith(("p", "g"))]
# Check if flagfile has farm attributes
flagfile_fn = os.path.join(parent.path_flags, parent.flagfile_basename)
flags = get_flags_from_flagfile(flagfile_fn)
parent.is_farm = False
for farm_attr in ["master", "workers", "cloud"]:
if flags[farm_attr] != "":
parent.is_farm = True
break
call_force_refreshing(parent, gb_farm.setChecked, parent.is_farm)
def show_resources(parent):
"""Displays resources used in the container.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
str: Resources (memory and CPU) being used.
"""
return run_command("top -b -n 1")
def show_aws_resources(parent):
"""Displays resources used across the AWS cluster.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
src: Resources (memory and CPU) being used in the farm.
"""
return "\n".join(parent.parent.aws_util.ec2_get_running_instances())
def get_aws_workers():
"""Get names of the instances in the AWS farm.
Returns:
list[str]: Instances IDs of EC2 instances in the farm.
"""
with open(config.DOCKER_AWS_WORKERS) as f:
lines = f.readlines()
return lines
def set_aws_workers(workers):
"""Sets names of the instances in the AWS farm.
Args:
workers (list[str]): Instance IDs of EC2 instances in the farm.
"""
with open(config.DOCKER_AWS_WORKERS, "w") as f:
f.writelines([worker.id for worker in workers])
def popup_ec2_dashboard_url(parent):
"""Displays a link to the EC2 dashboard in a popup on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
region = parent.parent.aws_util.region_name
prefix = f"{region}." if region else ""
url = f"https://{prefix}console.aws.amazon.com/ec2#Instances"
dep_util.popup_message(parent.parent, url, "EC2 Dashboard")
def popup_logs_locations(parent):
"""Displays the path to local logs in a popup on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
logs = [parent.log_reader.log_file]
logs_workers = glob.iglob(f"{parent.path_logs}/Worker-*", recursive=False)
for log in logs_workers:
ts_log = datetime.datetime.fromtimestamp(os.path.getmtime(log))
if ts_log > parent.parent.ts_start:
logs.append(log)
project = parent.parent.path_project
logs = [dep_util.remove_prefix(l, f"{project}/") for l in logs]
dep_util.popup_message(parent.parent, "\n".join(logs), "Logs")
def run_process_aws(parent, gb, p_id=None):
"""Runs the process to create a cluster on AWS and perform the render job.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
flags = {}
flags["key_dir"] = os.path.dirname(parent.path_aws_key_fn)
flags["key_name"] = os.path.splitext(os.path.basename(parent.path_aws_key_fn))[0]
flags["csv_path"] = parent.path_aws_credentials
flags["ec2_file"] = parent.path_aws_ip_file
spin_num_workers = getattr(parent.dlg, f"spin_{parent.tag}_farm_num_workers", None)
flags["cluster_size"] = int(spin_num_workers.value())
flags["region"] = parent.parent.aws_util.region_name
dd_ec2 = getattr(parent.dlg, f"dd_{parent.tag}_farm_ec2", None)
flags["instance_type"] = dd_ec2.currentText()
flags["tag"] = parent.tag
# Overwrite flag file
app_name = parent.app_aws_create
flagfile_fn = os.path.join(parent.path_flags, parent.app_name_to_flagfile[app_name])
dep_util.write_flagfile(flagfile_fn, flags)
if not p_id:
p_id = "run_aws_create"
run_process(parent, gb, app_name, flagfile_fn, p_id)
def on_download_meshes(parent, gb):
"""Downloads meshes from S3. This is a no-op if not an S3 project.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
if not parent.parent.is_aws:
return
subdir = image_type_paths["video_bin"]
flags = {}
flags["csv_path"] = parent.path_aws_credentials
flags["local_dir"] = os.path.join(config.DOCKER_INPUT_ROOT, subdir)
flags["s3_dir"] = os.path.join(parent.parent.ui_flags.project_root, subdir)
flags["verbose"] = parent.parent.ui_flags.verbose
flags["watch"] = True # NOTE: watchdog sometimes gets stale file handles in Windows
# Overwrite flag file
app_name = parent.app_aws_download_meshes
flagfile_fn = os.path.join(parent.path_flags, parent.app_name_to_flagfile[app_name])
dep_util.write_flagfile(flagfile_fn, flags)
p_id = "download_meshes"
run_process(parent, gb, app_name, flagfile_fn, p_id)
def on_terminate_cluster(parent, gb):
"""Terminates a running AWS cluster. This is a no-op if no cluster is up.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
flags = {}
flags["key_dir"] = os.path.dirname(parent.path_aws_key_fn)
flags["key_name"] = os.path.splitext(os.path.basename(parent.path_aws_key_fn))[0]
flags["csv_path"] = parent.path_aws_credentials
flags["ec2_file"] = parent.path_aws_ip_file
flags["region"] = parent.parent.aws_util.region_name
# Overwrite flag file
flagfile_fn = os.path.join(
parent.path_flags, parent.app_name_to_flagfile[parent.app_aws_clean]
)
dep_util.write_flagfile(flagfile_fn, flags)
app_name = parent.app_aws_clean
p_id = "terminate_cluster"
run_process(parent, gb, app_name, flagfile_fn, p_id)
def get_workers(parent):
"""Finds workers in a LAN farm.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
list[str]: IPs of workers in the local farm.
"""
if parent.parent.ui_flags.master == config.LOCALHOST:
return []
else:
return parent.lan.scan()
def call_force_refreshing(parent, fun, *args):
already_refreshing = parent.is_refreshing_data
if not already_refreshing:
parent.is_refreshing_data = True
fun(*args)
if not already_refreshing:
parent.is_refreshing_data = False
|
src/test/pythonFiles/definition/three.py | ChaseKnowlden/vscode-jupyter | 615 | 12768054 | import two
two.ct().fun() |
nndet/inference/ensembler/base.py | joeranbosma/nnDetection | 242 | 12768066 | <filename>nndet/inference/ensembler/base.py
"""
Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os import PathLike
from pathlib import Path
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union, TypeVar
import torch
from nndet.io.load import save_pickle
from nndet.utils.tensor import to_numpy
from nndet.utils.info import maybe_verbose_iterable
class BaseEnsembler(ABC):
ID = "abstract"
def __init__(self,
properties: Dict[str, Any],
parameters: Dict[str, Any],
device: Optional[Union[torch.device, str]] = None,
**kwargs):
"""
Base class to containerize and ensemble the predictions of a single case.
Call :method:`process_batch` to add batched predictions of a case
to the ensembler and :method:`add_model` to signal the next model
if multiple models are used.
Args:
properties: properties of the patient/case (e.g. tranpose axes)
parameters: parameters for ensembling
device: device to use for internal computations
**kwargs: parameters for ensembling
Notes:
Call :method:`add_model` before adding predictions.
"""
self.model_current = None
self.model_results = {}
self.model_weights = {}
self.properties = properties
self.case_result: Optional[Dict] = None
self.parameters = parameters
self.parameters.update(kwargs)
if device is None:
self.device = torch.device("cpu")
elif isinstance(device, str):
self.device = torch.device(device)
elif isinstance(device, torch.device):
self.device = device
else:
raise ValueError(f"Wrong type {type(device)} for device argument.")
@classmethod
def from_case(cls,
case: Dict,
properties: Optional[Dict] = None,
parameters: Optional[Dict] = None,
**kwargs,
):
"""
Primary way to instantiate this class. Automatically extracts all
properties and uses a default set of parameters for ensembling.
Args:
case: case which is predicted
properties: Additional properties. Defaults to None.
parameters: Additional parameters. Defaults to None.
"""
return cls(properties=properties, parameters=parameters, **kwargs)
def add_model(self,
name: Optional[str] = None,
model_weight: Optional[float] = None,
) -> str:
"""
This functions signales the ensembler to add a new model for internal
processing
Args:
name: Name of the model. If None, uses counts the models.
model_weight: Optional weight for this model. Defaults to None.
"""
if name is None:
name = len(self.model_weights) + 1
if name in self.model_results:
raise ValueError(f"Invalid model name, model {name} is already present")
if model_weight is None:
model_weight = 1.0
self.model_weights[name] = model_weight
self.model_results[name] = defaultdict(list)
self.model_current = name
return name
@abstractmethod
@torch.no_grad()
def process_batch(self, result: Dict, batch: Dict):
"""
Process a single batch
Args:
result: predictions to save and ensemble
batch: input batch used for predictions (for additional meta data)
Raises:
NotImplementedError: Overwrite this function in subclasses for the
specific use case.
Warnings:
Make sure to move cached values to the CPU after they have been
processed.
"""
raise NotImplementedError
@abstractmethod
@torch.no_grad()
def get_case_result(self, restore: bool = False) -> Dict[str, torch.Tensor]:
"""
Retrieve the results of a single case
Args:
restore: restores predictions in original image space
Raises:
NotImplementedError: Overwrite this function in subclasses for the
specific use case.
Returns:
Dict[str, torch.Tensor]: the result of a single case
"""
raise NotImplementedError
def update_parameters(self, **parameters: Dict):
"""
Update internal parameters used for ensembling the results
Args:
parameters: parameters to update
"""
self.parameters.update(parameters)
@classmethod
@abstractmethod
def sweep_parameters(cls) -> Tuple[Dict[str, Any], Dict[str, Sequence[Any]]]:
"""
Return a set of parameters which can be used to sweep ensembling
parameters in a postprocessing step
Returns:
Dict[str, Any]: default state to start with
Dict[str, Sequence[Any]]]: Defines the values to search for each
parameter
"""
raise NotImplementedError
def save_state(self,
target_dir: Path,
name: str,
**kwargs,
):
"""
Save case result as pickle file. Identifier of ensembler will
be added to the name
Args:
target_dir: folder to save result to
name: name of case
**kwargs: data to save
"""
kwargs["properties"] = self.properties
kwargs["parameters"] = self.parameters
kwargs["model_current"] = self.model_current
kwargs["model_results"] = self.model_results
kwargs["model_weights"] = self.model_weights
kwargs["case_result"] = self.case_result
with open(Path(target_dir) / f"{name}_{self.ID}.pt", "wb") as f:
torch.save(kwargs, f)
def load_state(self, base_dir: PathLike, case_id: str) -> Dict:
"""
Path to result file
"""
ckp = torch.load(str(Path(base_dir) / f"{case_id}_{self.ID}.pt"))
self._load(ckp)
return ckp
def _load(self, state: Dict):
for key, item in state.items():
setattr(self, key, item)
@classmethod
def from_checkpoint(cls, base_dir: PathLike, case_id: str):
ckp = torch.load(str(Path(base_dir) / f"{case_id}_{cls.ID}.pt"))
t = cls(
properties=ckp["properties"],
parameters=ckp["parameters"],
)
t._load(ckp)
return t
@classmethod
def get_case_ids(cls, base_dir: PathLike):
return [c.stem.rsplit(f"_{cls.ID}", 1)[0]
for c in Path(base_dir).glob(f"*_{cls.ID}.pt")]
class OverlapMap:
def __init__(self, data_shape: Sequence[int]):
"""
Handler for overlap map
Args:
data_shape: spatial dimensions of data (
no batch dim and no channel dim!)
"""
self.overlap_map: torch.Tensor = \
torch.zeros(*data_shape, requires_grad=False, dtype=torch.float)
def add_overlap(self, crop: Sequence[slice]):
"""
Increase values of :param:`self.overlap_map` inside of crop
Args:
crop: defines crop. Negative values are assumed to be outside
of the data and thus discarded
"""
# discard leading indexes which could be due to batches and channels
if len(crop) > self.overlap_map.ndim:
crop = crop[-self.overlap_map.ndim:]
# clip crop to data shape
slicer = []
for data_shape, crop_dim in zip(tuple(self.overlap_map.shape), crop):
start = max(0, crop_dim.start)
stop = min(data_shape, crop_dim.stop)
slicer.append(slice(start, stop, crop_dim.step))
self.overlap_map[slicer] += 1
def mean_num_overlap_of_box(self, box: Sequence[int]) -> float:
"""
Extract mean number of overlaps from a bounding box area
Args:
box: defines bounding box (x1, y1, x2, y2, (z1, z2))
Returns:
int: mean number of overlaps
"""
slicer = [slice(int(box[0]), int(box[2])), slice(int(box[1]), int(box[3]))]
if len(box) == 6:
slicer.append(slice(int(box[4]), int(box[5])))
return torch.mean(self.overlap_map[slicer].float()).item()
def mean_num_overlap_of_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""
Extract mean number of overlaps from a bounding box area
Args:
boxes: defines multiple bounding boxes (x1, y1, x2, y2, (z1, z2))
[N, dim * 2]
Returns:
Tensor: mean number of overlaps per box [N]
"""
return torch.tensor(
[self.mean_num_overlap_of_box(box) for box in boxes]).to(
dtype=torch.float, device=boxes.device)
def avg(self) -> torch.Tensor:
"""
Compute mean over all overlaps
"""
return self.overlap_map.float().median()
def restore_mean(self, val):
"""
Generate a new overlap map filled with the specified value
"""
self.overlap_map = torch.zeros_like(self.overlap_map)
self.overlap_map = float(val)
def extract_results(source_dir: PathLike,
target_dir: PathLike,
ensembler_cls: Callable,
restore: bool,
**params,
) -> None:
"""
Compute case result from ensembler and save it
Args:
source_dir: directory which contains the saved predictions/state from
the ensembler class
target_dir: directory to save results
ensembler_cls: ensembler class for prediction
restore: if true, the results are converted into the opriginal image
space
"""
Path(target_dir).mkdir(parents=True, exist_ok=True)
for case_id in maybe_verbose_iterable(ensembler_cls.get_case_ids(source_dir)):
ensembler = ensembler_cls.from_checkpoint(base_dir=source_dir, case_id=case_id)
ensembler.update_parameters(**params)
pred = to_numpy(ensembler.get_case_result(restore=restore))
save_pickle(pred, Path(target_dir) / f"{case_id}_{ensembler_cls.ID}.pkl")
BaseEnsemblerType = TypeVar('BaseEnsemblerType', bound=BaseEnsembler)
|
analytics/ot-iou/utils.py | xwu2git/Smart-City-Sample | 126 | 12768067 |
class BBUtil(object):
def __init__(self,width,height):
super(BBUtil, self).__init__()
self.width=width
self.height=height
def xywh_to_tlwh(self, bbox_xywh):
x,y,w,h = bbox_xywh
xmin = max(int(round(x - (w / 2))),0)
ymin = max(int(round(y - (h / 2))),0)
return [xmin,ymin,int(w),int(h)]
def tlwh_to_xyxy(self, bbox_tlwh):
x,y,w,h = bbox_tlwh
x1 = max(int(x),0)
x2 = min(int(x+w),self.width-1)
y1 = max(int(y),0)
y2 = min(int(y+h),self.height-1)
return [x1,y1,x2,y2]
def xywh_to_xyxy(self, bbox_xywh):
x,y,w,h = bbox_xywh
x1 = max(int(x-w/2),0)
x2 = min(int(x+w/2),self.width-1)
y1 = max(int(y-h/2),0)
y2 = min(int(y+h/2),self.height-1)
return [x1,y1,x2,y2]
def xyxy_to_tlwh(self, bbox_xyxy):
x1,y1,x2,y2 = bbox_xyxy
t = x1
l = y1
w = int(x2-x1)
h = int(y2-y1)
return [t,l,w,h]
def float_to_int(self,bbox_xyxy):
x1,y1,x2,y2 = bbox_xyxy
return [int(x1*self.width), int(y1*self.height), int(x2*self.width), int(y2*self.height)]
def int_to_float(self,bbox_xyxy):
x1,y1,x2,y2 = [float(item) for item in bbox_xyxy]
return [x1/self.width, y1/self.height, x2/self.width, y2/self.height]
|
plato/agent/component/nlg/slot_filling_nlg.py | avmi/plato-research-dialogue-system | 899 | 12768070 | <filename>plato/agent/component/nlg/slot_filling_nlg.py
"""
Copyright (c) 2019-2020 Uber Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = "<NAME>"
from plato.agent.component.nlg.nlg import NLG
import random
"""
SlotFillingNLG is a simple template-based nlg, designed to work for
Slot-Filling applications. The purpose of this class is to provide a quick way
of running Conversational Agents, sanity checks, and to aid debugging.
"""
class SlotFillingNLG(NLG):
def __init__(self, args=None):
"""
Nothing to initialize. We need the args to support use by the Generic
Agent.
"""
super(SlotFillingNLG, self).__init__()
def initialize(self, args):
"""
Nothing to do here
:param args:
:return:
"""
pass
def generate_output(self, args=None):
"""
Select the appropriate template given the acts in the arguments and
generate the output utterance.
:param args: a dictionary of arguments that contain the dialogue acts
:return: the output utterance
"""
if not args:
print('WARNING! SlotFillingNLG called without arguments!')
return ''
if 'args' in args:
dacts = args['args']
elif 'dacts' not in args:
print('WARNING! SlotFillingNLG called without dacts!')
return ''
else:
dacts = args['dacts']
system = True
if 'system' in args:
system = bool(args['system'])
response = ''
for dact in dacts:
if dact.intent == 'request':
if dact.params and dact.params[0].slot:
if system:
response += 'Which ' + \
dact.params[0].slot + \
' do you prefer?'
else:
response += 'What is the ' + dact.params[0].slot + '?'
else:
response += 'Which one?'
elif dact.intent in ['inform', 'offer']:
for dact_item in dact.params:
if system:
if dact_item.slot == 'name' and \
dact_item.value == 'not found':
response += 'Sorry, I cannot find such an item. '
else:
if not dact_item.value:
response += 'its ' + \
dact_item.slot + \
' is unknown, '
elif dact_item.slot == 'name' and \
len(dact.params) > 1:
response += dact_item.value + ' '
elif dact_item.slot in ['food', 'cuisine']:
response += 'is serving ' + \
dact_item.value + \
' food, '
elif dact_item.slot == 'endorsement':
response += 'is ' + dact_item.value + ', '
else:
response += 'its ' + \
dact_item.slot + \
' is ' + dact_item.value + ', '
else:
if dact.intent == 'offer':
if dact_item.value:
response += dact_item.slot + ' is ' + \
dact_item.value + ', '
else:
response += dact_item.slot + ' is unknown, '
else:
r = random.random()
if r < 0.33:
response += 'I prefer ' + dact_item.value + \
' ' + dact_item.slot + ', '
elif r < 0.66:
response += 'um i want ' + dact_item.value + \
' ' + dact_item.slot + ', '
else:
response += dact_item.value + ' ' + \
dact_item.slot + ' please, '
if response:
# Trim trailing comma and space
response = response[:-2]
elif dact.intent == 'bye':
response += 'Thank you, goodbye'
elif dact.intent == 'deny':
response += 'No'
elif dact.intent == 'negate':
response += 'No '
if dact.params and dact.params[0].slot and \
dact.params[0].value:
response += dact.params[0].slot + \
' is not ' + dact.params[0].value
elif dact.intent == 'ack':
response += 'Ok'
elif dact.intent == 'affirm':
response += 'Yes '
if dact.params and dact.params[0].slot and \
dact.params[0].value:
response += dact.params[0].slot + \
' is ' + dact.params[0].value
elif dact.intent == 'thankyou':
response += 'Thank you'
elif dact.intent == 'reqmore':
response += 'Can you tell me more?'
elif dact.intent == 'repeat':
response += 'Can you please repeat?'
elif dact.intent == 'restart':
response += 'Can we start over?'
elif dact.intent == 'expl-conf':
response += 'Alright '
if dact.params and dact.params[0].slot and \
dact.params[0].value:
response += dact.params[0].slot + \
' is ' + dact.params[0].value
elif dact.intent == 'select':
response += 'Which one do you prefer '
if dact.params and dact.params[0].slot:
response += 'for ' + dact.params[0].slot
elif dact.intent == 'reqalts':
response += 'Is there anything else?'
elif dact.intent in ['confirm', 'confirm-domain']:
response += 'So is '
if dact.params and dact.params[0].slot and \
dact.params[0].value:
response += dact.params[0].slot + \
' ' + dact.params[0].value
elif dact.intent == 'canthelp':
response += 'Sorry, I cannot help you with that.'
elif dact.intent == 'welcomemsg':
response += 'Hello, how may I help you?'
elif dact.intent == 'hello':
response = 'Hi'
elif dact.intent == 'welcome':
response += random.choice(['Hi, how can I help you today?',
'Speak, human.'])
elif dact.intent == 'na':
response += '(no system response)'
else:
response += 'SlotFillingNLG %s' % dact
response += ' '
response = response.replace('addr', 'address')
response = response.replace('pricerange', 'price range')
response = response.replace('postcode', 'post code')
response = response.replace('dontcare', 'any')
return response
def train(self, data):
"""
Nothing to do here.
:param data:
:return:
"""
pass
def save(self, path=None):
"""
Nothing to do here.
:param path:
:return:
"""
pass
def load(self, path):
"""
Nothing to do here.
:param path:
:return:
"""
pass
|
pdftotree/TreeVisualizer.py | zviri/pdftotree | 329 | 12768077 | from typing import Tuple
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
except (AttributeError, ImportError):
from wand.display import display
else:
from IPython.display import display
from wand.color import Color
from wand.drawing import Drawing
from wand.image import Image
class TreeVisualizer:
"""
Object to display bounding boxes on a pdf document
"""
def __init__(self, pdf_file):
"""
:param pdf_path: directory where documents are stored
:return:
"""
self.pdf_file = pdf_file
def display_boxes(self, tree, html_path, filename_prefix, alternate_colors=False):
"""
Displays each of the bounding boxes passed in 'boxes' on images of the pdf
pointed to by pdf_file
boxes is a list of 5-tuples (page, top, left, bottom, right)
"""
imgs = []
colors = {
"section_header": Color("blue"),
"figure": Color("green"),
"figure_caption": Color("green"),
"table_caption": Color("red"),
"list": Color("yellow"),
"paragraph": Color("gray"),
"table": Color("red"),
"header": Color("brown"),
}
for i, page_num in enumerate(tree.keys()):
img = self.pdf_to_img(page_num)
draw = Drawing()
draw.fill_color = Color("rgba(0, 0, 0, 0.0)")
for clust in tree[page_num]:
for (pnum, pwidth, pheight, top, left, bottom, right) in tree[page_num][
clust
]:
draw.stroke_color = colors[clust]
draw.rectangle(left=left, top=top, right=right, bottom=bottom)
draw.push()
draw.font_size = 20
draw.font_weight = 10
draw.fill_color = colors[clust]
if int(left) > 0 and int(top) > 0:
draw.text(x=int(left), y=int(top), body=clust)
draw.pop()
draw(img)
img.save(filename=html_path + filename_prefix + "_page_" + str(i) + ".png")
imgs.append(img)
return imgs
def display_candidates(self, tree, html_path, filename_prefix):
"""
Displays the bounding boxes corresponding to candidates on an image of the pdf
boxes is a list of 5-tuples (page, top, left, bottom, right)
"""
imgs = self.display_boxes(
tree, html_path, filename_prefix, alternate_colors=True
)
return display(*imgs)
def pdf_to_img(self, page_num, pdf_dim=None):
"""
Converts pdf file into image
:param pdf_file: path to the pdf file
:param page_num: page number to convert (index starting at 1)
:return: wand image object
"""
if not pdf_dim:
pdf_dim = get_pdf_dim(self.pdf_file)
page_width, page_height = pdf_dim
img = Image(filename="{}[{}]".format(self.pdf_file, page_num - 1))
img.resize(page_width, page_height)
return img
def get_pdf_dim(pdf_file) -> Tuple[int, int]:
with open(pdf_file, "rb") as f:
parser = PDFParser(f)
doc = PDFDocument(parser)
# Look at the 1st page only.
page = next(PDFPage.create_pages(doc))
_, _, page_width, page_height = page.mediabox
return page_width, page_height
|
section_10_(dictionaries)/dict_values.py | alisonjo2786/pythonlessons_materials | 425 | 12768099 | # If you're new to dictionaries, you might want to start with dict_access.py
# We create a dictionary.
contacts = {
'Shannon': '202-555-1234',
'Amy': '410-515-3000',
'Jen': '301-600-5555',
'Julie': '202-333-9876'
}
# We can use the dictionary method .values() to give us a list of all of the values in contacts.
print contacts.values()
for phone in contacts.values():
print "{0}".format(phone)
# .values() is used less frequently than .keys() since you can't get the key from the value (but you can get the value if you know the key)
# Use .values() when you don't care what the key is, you just want a list of all of the values. It's less common, but still good to know. |
cantools/database/can/bus.py | VonSquiggles/cantools | 1,063 | 12768100 | <filename>cantools/database/can/bus.py
# A CAN bus.
class Bus(object):
"""A CAN bus.
"""
def __init__(self,
name,
comment=None,
baudrate=None,
fd_baudrate=None,
autosar_specifics=None):
self._name = name
# If the 'comment' argument is a string, we assume that is an
# English comment. This is slightly hacky, because the
# function's behavior depends on the type of the passed
# argument, but it is quite convenient...
if isinstance(comment, str):
# use the first comment in the dictionary as "The" comment
self._comments = { None: comment }
else:
# assume that we have either no comment at all or a
# multi-lingual dictionary
self._comments = comment
self._baudrate = baudrate
self._fd_baudrate = fd_baudrate
self._autosar = autosar_specifics
@property
def name(self):
"""The bus name as a string.
"""
return self._name
@property
def comment(self):
"""The bus' comment, or ``None`` if unavailable.
Note that we implicitly try to return the English comment if
multiple languages were specified.
"""
if self._comments is None:
return None
elif self._comments.get(None) is not None:
return self._comments.get(None)
elif self._comments.get("FOR-ALL") is not None:
return self._comments.get("FOR-ALL")
return self._comments.get('EN')
@property
def comments(self):
"""The dictionary with the descriptions of the bus in multiple
languages. ``None`` if unavailable.
"""
return self._comments
@property
def baudrate(self):
"""The bus baudrate, or ``None`` if unavailable.
"""
return self._baudrate
@property
def fd_baudrate(self):
"""The baudrate used for the payload of CAN-FD frames, or ``None`` if
unavailable.
"""
return self._fd_baudrate
@property
def autosar(self):
"""An object containing AUTOSAR specific properties of the bus.
"""
return self._autosar
@autosar.setter
def autosar(self, value):
self._autosar = value
def __repr__(self):
return "bus('{}', {})".format(
self._name,
"'" + self.comment + "'" if self.comment is not None else None)
|
src/hammer-vlsi/test_tool_utils.py | XiaoSanchez/hammer | 138 | 12768119 | <reponame>XiaoSanchez/hammer<filename>src/hammer-vlsi/test_tool_utils.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Helper and utility classes for testing HammerTool.
#
# See LICENSE for licence details.
import json
import os
import tempfile
from abc import ABCMeta, abstractmethod
from numbers import Number
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import hammer_tech
from hammer_tech import LibraryFilter
from hammer_config import HammerJSONEncoder
import hammer_vlsi
class SingleStepTool(hammer_vlsi.DummyHammerTool, metaclass=ABCMeta):
"""
Helper class to define a single-step tool in tests.
"""
@property
def steps(self) -> List[hammer_vlsi.HammerToolStep]:
return self.make_steps_from_methods([
self.step
])
@abstractmethod
def step(self) -> bool:
"""
Implement this method for the single step.
:return: True if the step passed
"""
pass
class DummyTool(SingleStepTool):
"""
A dummy tool that does nothing and always passes.
"""
def step(self) -> bool:
return True
class HammerToolTestHelpers:
"""
Helper functions to aid in the testing of IP library filtering/processing.
"""
@staticmethod
def create_tech_dir(tech_name: str) -> Tuple[str, str]:
"""
Create a temporary folder for a test technology.
Note: the caller is responsible for removing the tech_dir_base folder
after use!
:param tech_name: Technology name (e.g. "asap7")
:return: Tuple of create tech_dir and tech_dir_base (which the caller
must delete)
"""
tech_dir_base = tempfile.mkdtemp()
tech_dir = os.path.join(tech_dir_base, tech_name)
os.mkdir(tech_dir)
tech_init_py = os.path.join(tech_dir, "__init__.py")
with open(tech_init_py, "w") as f: # pylint: disable=invalid-name
f.write("from hammer_tech import HammerTechnology\nclass {t}Technology(HammerTechnology):\n pass\ntech = {t}Technology()".format(
t=tech_name))
return tech_dir, tech_dir_base
@staticmethod
def write_tech_json(
tech_json_filename: str,
postprocessing_func: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None) -> None:
"""
Write a dummy tech JSON to the given filename with the given
postprocessing.
"""
tech_json = {
"name": "dummy28",
"grid_unit": "0.001",
"time_unit": "1 ns",
"installs": [
{
"path": "test",
"base var": "" # means relative to tech dir
}
],
"libraries": [
{"milkyway techfile": "test/soy"},
{"openaccess techfile": "test/juice"},
{"milkyway techfile": "test/coconut"},
{
"openaccess techfile": "test/orange",
"provides": [
{"lib_type": "stdcell"}
]
},
{
"openaccess techfile": "test/grapefruit",
"provides": [
{"lib_type": "stdcell"}
]
},
{
"openaccess techfile": "test/tea",
"provides": [
{"lib_type": "technology"}
]
},
]
} # type: Dict[str, Any]
if postprocessing_func is not None:
tech_json = postprocessing_func(tech_json)
with open(tech_json_filename, "w") as f: # pylint: disable=invalid-name
f.write(json.dumps(tech_json, cls=HammerJSONEncoder, indent=4))
@staticmethod
def make_test_filter() -> LibraryFilter:
"""
Make a test filter that returns libraries with openaccess techfiles with libraries that provide 'technology'
in lib_type first, with the rest sorted by the openaccess techfile.
"""
def filter_func(lib: hammer_tech.Library) -> bool:
return lib.openaccess_techfile is not None
def paths_func(lib: hammer_tech.Library) -> List[str]:
assert lib.openaccess_techfile is not None
return [lib.openaccess_techfile]
def sort_func(lib: hammer_tech.Library) -> Union[Number, str, tuple]:
assert lib.openaccess_techfile is not None
if lib.provides is not None and len(
list(filter(lambda x: x is not None and x.lib_type == "technology", lib.provides))) > 0:
# Put technology first
return (0, "")
else:
return (1, str(lib.openaccess_techfile))
return LibraryFilter.new(
filter_func=filter_func,
paths_func=paths_func,
tag="test", description="Test filter",
is_file=True,
sort_func=sort_func
)
|
cozy/polynomials.py | mernst/cozy | 188 | 12768158 | <reponame>mernst/cozy
"""Class for representing polynomials of one variable."""
import functools
@functools.total_ordering
class Polynomial(object):
__slots__ = ("terms",)
def __init__(self, terms=()):
terms = list(terms)
while terms and (terms[-1] == 0):
terms.pop()
self.terms = tuple(terms)
def __hash__(self):
return hash(self.terms)
def __eq__(self, other):
return self.terms == other.terms
def __lt__(self, other):
if len(self.terms) != len(other.terms):
return len(self.terms) < len(other.terms)
for i in reversed(range(len(self.terms))):
self_term = self.terms[i]
other_term = other.terms[i]
if self_term < other_term:
return True
if other_term < self_term:
return False
return False
def __str__(self):
if not self.terms:
return "0"
s = str(self.terms[0])
for i in range(1, len(self.terms)):
if self.terms[i]:
term = str(self.terms[i])
exponent = "n^{}".format(i) if i > 1 else "n"
s = term + exponent + " + " + s
return s
def __repr__(self):
return "Polynomial({!r})".format(self.terms)
def get_coefficient(self, i):
if i >= len(self.terms):
return 0
return self.terms[i]
def largest_term(self):
if not self.terms:
return DominantTerm.ZERO
exponent = len(self.terms) - 1
return DominantTerm(
multiplier=self.get_coefficient(exponent),
exponent=exponent)
def __add__(self, other):
terms = [0] * max(len(self.terms), len(other.terms))
for i in range(len(terms)):
terms[i] = self.get_coefficient(i) + other.get_coefficient(i)
return Polynomial(terms)
def __mul__(self, other):
if isinstance(other, Polynomial):
res = Polynomial.ZERO
for i in range(len(self.terms)):
res += other * self.terms[i]
res += Polynomial([0] * i + list(other.terms))
return res
else:
return Polynomial((t * other) for t in self.terms)
Polynomial.ZERO = Polynomial()
Polynomial.ONE = Polynomial([1])
Polynomial.N = Polynomial([0, 1])
@functools.total_ordering
class DominantTerm(object):
"""A term of the form c*n^e for some unknown n.
Instances of this class can be added, multiplied, and compared. A term
with a higher exponent is always greater than one with a lower exponent.
"""
__slots__ = ("multiplier", "exponent")
def __init__(self, multiplier, exponent):
self.multiplier = multiplier
self.exponent = exponent
def __eq__(self, other):
return self.multiplier == other.multiplier and self.exponent == other.exponent
def __lt__(self, other):
return (self.exponent, self.multiplier) < (other.exponent, other.multiplier)
def __str__(self):
return "{}n^{}".format(self.multiplier, self.exponent)
def __repr__(self):
return "DominantTerm({}, {})".format(self.multiplier, self.exponent)
def __add__(self, other):
if other.exponent == self.exponent:
return DominantTerm(self.multiplier + other.multiplier, self.exponent)
if other.exponent > self.exponent:
return other
return self
def __mul__(self, other):
return DominantTerm(self.multiplier * other.multiplier, self.exponent + other.exponent)
DominantTerm.ZERO = DominantTerm(0, 0)
DominantTerm.ONE = DominantTerm(1, 0)
DominantTerm.N = DominantTerm(1, 1)
|
Algorithms/string_generator/string_generator.py | TeacherManoj0131/HacktoberFest2020-Contributions | 256 | 12768163 | <reponame>TeacherManoj0131/HacktoberFest2020-Contributions
import string,random
def string_generator(size, chars):
return ''.join(random.choice(chars) for _ in range(size))
def get_option(option):
if option == 'alphabet':
characters = string.ascii_uppercase + string.ascii_lowercase + string.digits
elif option == 'numeric':
characters = string.digits
else:
print('option out of context!')
return characters
# choose want alphabet generic or numeric generic
option = 'alphabet'
# choose length of size string
size = 10
characters = get_option(option)
new_number = string_generator(size,characters)
print(new_number) |
gym_trading/envs/simulator.py | AdrianP-/gym_trading | 109 | 12768164 | <reponame>AdrianP-/gym_trading<filename>gym_trading/envs/simulator.py<gh_stars>100-1000
import numpy as np
import pandas as pd
from .feature_engineering import FeatureEngineering
class Simulator(object):
def __init__(self, csv_name, train_split, dummy_period=None, train=True, multiple_trades=False):
if "EUR" in csv_name:
df = pd.read_csv(csv_name, parse_dates=[[0, 1]], header=None,
names=['Date', 'Time', 'Open', 'High', 'Low', 'Close', 'Volume'])
df = df[~np.isnan(df['Open'])].set_index('Date_Time')
else:
df = pd.read_csv(csv_name, usecols=['Date', 'High', 'Low', 'Open', 'Close', 'Volume'])
df = df[~np.isnan(df['Open'])].set_index('Date')
df = FeatureEngineering(df).get_df_processed()
##Attributes
self.data = df
self.date_time = df.index
self.count = df.shape[0]
self.train_end_index = int(train_split * self.count)
# Attributes related to the observation state: Return
# print(self.data.head(1))
data_dropped = self.data.drop(['Volume', 'Open', 'Close', 'High', 'Low'], axis=1)
print(data_dropped.head(1))
self.states = data_dropped.values
self.min_values = data_dropped.min(axis=0).values
self.max_values = data_dropped.max(axis=0).values
# Generate previous Close
if dummy_period is not None:
close_prices = pd.DataFrame()
close_prices['Close'] = self.data["Close"]
for i in range(1, dummy_period + 1):
close_prices['Close (n - %s)' % i] = self.data['Close'].shift(i)
self.close = close_prices.values
self._reset()
def _reset(self, train=True):
if train:
obs = self.states[0]
self.current_index = 1
self._end = self.train_end_index
else:
self.current_index = self.train_end_index + 1
obs = self.states[self.current_index]
self._end = self.count - 1
self._data = self.data.iloc[self.current_index:self._end + 1]
return obs
def _step(self, open_trade, duration_trade):
if open_trade:
obs = self.states[self.current_index] + [open_trade] + [duration_trade]
else:
obs = self.states[self.current_index]
self.current_index += 1
done = self.current_index > self._end
return obs, done
|
tests/integ/test_record_set.py | LastRemote/sagemaker-python-sdk | 1,690 | 12768173 | <filename>tests/integ/test_record_set.py
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from six.moves.urllib.parse import urlparse
from sagemaker import KMeans
from tests.integ import datasets
def test_record_set(sagemaker_session, cpu_instance_type):
"""Test the method ``AmazonAlgorithmEstimatorBase.record_set``.
In particular, test that the objects uploaded to the S3 bucket are encrypted.
"""
kmeans = KMeans(
role="SageMakerRole",
instance_count=1,
instance_type=cpu_instance_type,
k=10,
sagemaker_session=sagemaker_session,
)
record_set = kmeans.record_set(datasets.one_p_mnist()[0][:100], encrypt=True)
parsed_url = urlparse(record_set.s3_data)
s3_client = sagemaker_session.boto_session.client("s3")
head = s3_client.head_object(Bucket=parsed_url.netloc, Key=parsed_url.path.lstrip("/"))
assert head["ServerSideEncryption"] == "AES256"
|
bin/rstrip.py | cwickham/merely-useful.github.io | 190 | 12768175 | #!/usr/bin/env python
'''
Strip trailing whitespaces from lines.
Usage: rstrip.py file file...
'''
import sys
def main(filenames):
for f in filenames:
with open(f, 'r') as reader:
lines = reader.readlines()
lines = [x.rstrip() + '\n' for x in lines]
with open(f, 'w') as writer:
writer.writelines(lines)
if __name__ == '__main__':
main(sys.argv[1:])
|
integrations/tensorflow_v1/classification.py | clementpoiret/sparseml | 922 | 12768179 | <filename>integrations/tensorflow_v1/classification.py<gh_stars>100-1000
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Perform optimization tasks on image classification tensorflow_v1 including:
* Model training
* Model pruning
* Sparse transfer learning
* pruning sensitivity analysis
* ONNX export
##########
Command help:
usage: classification.py [-h] {train,export,pruning_sensitivity} ...
Run tasks on classification models and datasets using the sparseml API
positional arguments:
{train,export,pruning_sensitivity}
optional arguments:
-h, --help show this help message and exit
##########
train command help:
usage: classification.py train [-h] --arch-key ARCH_KEY
[--pretrained PRETRAINED]
[--pretrained-dataset PRETRAINED_DATASET]
[--checkpoint-path CHECKPOINT_PATH]
[--model-kwargs MODEL_KWARGS] --dataset DATASET
--dataset-path DATASET_PATH
[--dataset-kwargs DATASET_KWARGS]
[--model-tag MODEL_TAG] [--save-dir SAVE_DIR]
[--dataset-parallel-calls DATASET_PARALLEL_CALLS]
[--shuffle-buffer-size SHUFFLE_BUFFER_SIZE]
[--recipe-path RECIPE_PATH]
[--sparse-transfer-learn] [--eval-mode]
--train-batch-size TRAIN_BATCH_SIZE
--test-batch-size TEST_BATCH_SIZE
[--logs-dir LOGS_DIR]
[--save-best-after SAVE_BEST_AFTER]
[--save-epochs SAVE_EPOCHS [SAVE_EPOCHS ...]]
[--init-lr INIT_LR] [--optim-args OPTIM_ARGS]
Train and/or prune an image classification model
optional arguments:
-h, --help show this help message and exit
--arch-key ARCH_KEY The type of model to use, ex: resnet50, vgg16,
mobilenet put as help to see the full list (will raise
an exception with the list)
--pretrained PRETRAINED
The type of pretrained weights to use, default is true
to load the default pretrained weights for the model.
Otherwise should be set to the desired weights type:
[base, optim, optim-perf]. To not load any weights set
to one of [none, false]
--pretrained-dataset PRETRAINED_DATASET
The dataset to load pretrained weights for if
pretrained is set. Default is None which will load the
default dataset for the architecture. Ex can be set to
imagenet, cifar10, etc
--checkpoint-path CHECKPOINT_PATH
A path to a previous checkpoint to load the state from
and resume the state for. If provided, pretrained will
be ignored
--model-kwargs MODEL_KWARGS
kew word arguments to be passed to model constructor,
should be given as a json object
--dataset DATASET The dataset to use for training, ex: imagenet,
imagenette, cifar10, etc. Set to imagefolder for a
generic dataset setup with an image folder structure
setup like imagenet or loadable by a dataset in
sparseml.tensorflow_v1.datasets
--dataset-path DATASET_PATH
The root path to where the dataset is stored
--dataset-kwargs DATASET_KWARGS
kew word arguments to be passed to dataset
constructor, should be given as a json object
--model-tag MODEL_TAG
A tag to use for the model for saving results under
save-dir, defaults to the model arch and dataset used
--save-dir SAVE_DIR The path to the directory for saving results
--dataset-parallel-calls DATASET_PARALLEL_CALLS
the number of parallel workers for dataset loading
--shuffle-buffer-size SHUFFLE_BUFFER_SIZE
Shuffle buffer size for dataset loading
--recipe-path RECIPE_PATH
The path to the yaml file containing the modifiers and
schedule to apply them with. If set to
'transfer_learning', then will create a schedule to
enable sparse transfer learning
--sparse-transfer-learn
Enable sparse transfer learning modifiers to enforce
the sparsity for already sparse layers. The modifiers
are added to the ones to be loaded from the recipe-
path
--eval-mode Puts into evaluation mode so that the model can be
evaluated on the desired dataset
--train-batch-size TRAIN_BATCH_SIZE
The batch size to use while training
--test-batch-size TEST_BATCH_SIZE
The batch size to use while testing
--logs-dir LOGS_DIR The path to the directory for saving logs
--save-best-after SAVE_BEST_AFTER
start saving the best validation result after the
given epoch completes until the end of training
--save-epochs SAVE_EPOCHS [SAVE_EPOCHS ...]
epochs to save checkpoints at
--init-lr INIT_LR The initial learning rate to use while training, the
actual initial value used should be set by the
sparseml recipe
--optim-args OPTIM_ARGS
Additional args to be passed to the optimizer passed
in as a json object
##########
export command help:
usage: classification.py export [-h] --arch-key ARCH_KEY
[--pretrained PRETRAINED]
[--pretrained-dataset PRETRAINED_DATASET]
[--checkpoint-path CHECKPOINT_PATH]
[--model-kwargs MODEL_KWARGS] --dataset
DATASET --dataset-path DATASET_PATH
[--dataset-kwargs DATASET_KWARGS]
[--model-tag MODEL_TAG] [--save-dir SAVE_DIR]
[--num-samples NUM_SAMPLES]
[--onnx-opset ONNX_OPSET]
Export a model to onnx as well as store sample inputs, outputs, and labels
optional arguments:
-h, --help show this help message and exit
--arch-key ARCH_KEY The type of model to use, ex: resnet50, vgg16,
mobilenet put as help to see the full list (will raise
an exception with the list)
--pretrained PRETRAINED
The type of pretrained weights to use, default is true
to load the default pretrained weights for the model.
Otherwise should be set to the desired weights type:
[base, optim, optim-perf]. To not load any weights set
to one of [none, false]
--pretrained-dataset PRETRAINED_DATASET
The dataset to load pretrained weights for if
pretrained is set. Default is None which will load the
default dataset for the architecture. Ex can be set to
imagenet, cifar10, etc
--checkpoint-path CHECKPOINT_PATH
A path to a previous checkpoint to load the state from
and resume the state for. If provided, pretrained will
be ignored
--model-kwargs MODEL_KWARGS
kew word arguments to be passed to model constructor,
should be given as a json object
--dataset DATASET The dataset to use for training, ex: imagenet,
imagenette, cifar10, etc. Set to imagefolder for a
generic dataset setup with an image folder structure
setup like imagenet or loadable by a dataset in
sparseml.tensorflow_v1.datasets
--dataset-path DATASET_PATH
The root path to where the dataset is stored
--dataset-kwargs DATASET_KWARGS
kew word arguments to be passed to dataset
constructor, should be given as a json object
--model-tag MODEL_TAG
A tag to use for the model for saving results under
save-dir, defaults to the model arch and dataset used
--save-dir SAVE_DIR The path to the directory for saving results
--num-samples NUM_SAMPLES
The number of samples to export along with the model
onnx and pth files (sample inputs and labels as well
as the outputs from model execution)
--onnx-opset ONNX_OPSET
The onnx opset to use for export. Default is 11
##########
pruning_sensitivity command help:
usage: classification.py pruning_sensitivity [-h] --arch-key ARCH_KEY
[--pretrained PRETRAINED]
[--pretrained-dataset PRETRAINED_DATASET]
[--checkpoint-path CHECKPOINT_PATH]
[--model-kwargs MODEL_KWARGS]
--dataset DATASET --dataset-path
DATASET_PATH
[--dataset-kwargs DATASET_KWARGS]
[--model-tag MODEL_TAG]
[--save-dir SAVE_DIR]
[--dataset-parallel-calls
DATASET_PARALLEL_CALLS]
[--shuffle-buffer-size SHUFFLE_BUFFER_SIZE]
[--approximate]
[--steps-per-measurement
STEPS_PER_MEASUREMENT]
[--batch-size BATCH_SIZE]
Run a kernel sparsity (pruning) analysis for a given model
optional arguments:
-h, --help show this help message and exit
--arch-key ARCH_KEY The type of model to use, ex: resnet50, vgg16,
mobilenet put as help to see the full list (will raise
an exception with the list)
--pretrained PRETRAINED
The type of pretrained weights to use, default is true
to load the default pretrained weights for the model.
Otherwise should be set to the desired weights type:
[base, optim, optim-perf]. To not load any weights set
to one of [none, false]
--pretrained-dataset PRETRAINED_DATASET
The dataset to load pretrained weights for if
pretrained is set. Default is None which will load the
default dataset for the architecture. Ex can be set to
imagenet, cifar10, etc
--checkpoint-path CHECKPOINT_PATH
A path to a previous checkpoint to load the state from
and resume the state for. If provided, pretrained will
be ignored
--model-kwargs MODEL_KWARGS
kew word arguments to be passed to model constructor,
should be given as a json object
--dataset DATASET The dataset to use for training, ex: imagenet,
imagenette, cifar10, etc. Set to imagefolder for a
generic dataset setup with an image folder structure
setup like imagenet or loadable by a dataset in
sparseml.tensorflow_v1.datasets
--dataset-path DATASET_PATH
The root path to where the dataset is stored
--dataset-kwargs DATASET_KWARGS
kew word arguments to be passed to dataset
constructor, should be given as a json object
--model-tag MODEL_TAG
A tag to use for the model for saving results under
save-dir, defaults to the model arch and dataset used
--save-dir SAVE_DIR The path to the directory for saving results
--dataset-parallel-calls DATASET_PARALLEL_CALLS
the number of parallel workers for dataset loading
--shuffle-buffer-size SHUFFLE_BUFFER_SIZE
Shuffle buffer size for dataset loading
--approximate True to approximate without running data through the
model, otherwise will run a one shot analysis
--steps-per-measurement STEPS_PER_MEASUREMENT
The number of steps (batches) to run for each
measurement
--batch-size BATCH_SIZE
The batch size to use while performing analysis
#########
EXAMPLES
#########
##########
Example command for pruning resnet50 on imagenet dataset:
python scripts/tensorflow_v1/classification.py train \
--recipe-path ~/sparseml_recipes/pruning_resnet50.yaml \
--arch-key resnet50 --dataset imagenet --dataset-path ~/datasets/ILSVRC2012 \
--train-batch-size 256 --test-batch-size 1024
##########
Example command for transfer learning sparse mobilenet_v1 on an image folder dataset:
python scripts/tensorflow_v1/classification.py train \
--sparse-transfer-learn \
--recipe-path ~/sparseml_recipes/pruning_mobilenet.yaml \
--arch-key mobilenet_v1 --pretrained optim \
--dataset imagefolder --dataset-path ~/datasets/my_imagefolder_dataset \
--train-batch-size 256 --test-batch-size 1024
##########
Example command for exporting ResNet50:
python scripts/tensorflow_v1/classification.py export \
--arch-key resnet50 --dataset imagenet --dataset-path ~/datasets/ILSVRC2012
##########
Example command for running approximated KS sensitivity analysis on mobilenet:
python scripts/tensorflow_v1/classification.py pruning_sensitivity \
--approximate \
--arch-key mobilenet --dataset imagenet \
--dataset-path ~/datasets/ILSVRC2012
##########
Example command for running one shot KS sensitivity analysis on resnet50 for coco:
python scripts/tensorflow_v1/classification.py pruning_sensitivity \
--arch-key resnet50 --dataset imagenet \
--dataset-path ~/datasets/ILSVRC2012
"""
import argparse
import json
import math
import os
from typing import Dict, Optional, Tuple
import numpy
from sparseml import get_main_logger
from sparseml.tensorflow_v1.datasets import (
Dataset,
DatasetRegistry,
create_split_iterators_handle,
)
from sparseml.tensorflow_v1.models import ModelRegistry
from sparseml.tensorflow_v1.optim import (
ConstantPruningModifier,
ScheduledModifierManager,
pruning_loss_sens_magnitude,
pruning_loss_sens_one_shot,
pruning_loss_sens_op_vars,
)
from sparseml.tensorflow_v1.utils import (
GraphExporter,
accuracy,
batch_cross_entropy_loss,
tf_compat,
write_simple_summary,
)
from sparseml.utils import create_dirs
LOGGER = get_main_logger()
TRAIN_COMMAND = "train"
EXPORT_COMMAND = "export"
PRUNING_SENSITVITY_COMMAND = "pruning_sensitivity"
def parse_args():
parser = argparse.ArgumentParser(
description="Run tasks on classification models and datasets "
"using the sparseml API"
)
subparsers = parser.add_subparsers(dest="command")
train_parser = subparsers.add_parser(
TRAIN_COMMAND,
description="Train and/or prune an image classification model",
)
export_parser = subparsers.add_parser(
EXPORT_COMMAND,
description="Export a model to onnx as well as "
"store sample inputs, outputs, and labels",
)
pruning_sensitivity_parser = subparsers.add_parser(
PRUNING_SENSITVITY_COMMAND,
description="Run a kernel sparsity (pruning) analysis for a given model",
)
parsers = [
train_parser,
export_parser,
pruning_sensitivity_parser,
]
for par in parsers:
# general arguments
# model args
par.add_argument(
"--arch-key",
type=str,
required=True,
help="The type of model to use, ex: resnet50, vgg16, mobilenet "
"put as help to see the full list (will raise an exception with the list)",
)
par.add_argument(
"--pretrained",
type=str,
default=True,
help="The type of pretrained weights to use, "
"default is true to load the default pretrained weights for the model. "
"Otherwise should be set to the desired weights type: "
"[base, optim, optim-perf]. "
"To not load any weights set to one of [none, false]",
)
par.add_argument(
"--pretrained-dataset",
type=str,
default=None,
help="The dataset to load pretrained weights for if pretrained is set. "
"Default is None which will load the default dataset for the architecture."
" Ex can be set to imagenet, cifar10, etc",
)
par.add_argument(
"--checkpoint-path",
type=str,
default=None,
help="A path to a previous checkpoint to load the state from and "
"resume the state for. If provided, pretrained will be ignored",
)
par.add_argument(
"--model-kwargs",
type=json.loads,
default={},
help="kew word arguments to be passed to model constructor, should be "
" given as a json object",
)
# dataset args
par.add_argument(
"--dataset",
type=str,
required=True,
help="The dataset to use for training, "
"ex: imagenet, imagenette, cifar10, etc. "
"Set to imagefolder for a generic dataset setup "
"with an image folder structure setup like imagenet or loadable by a "
"dataset in sparseml.tensorflow_v1.datasets",
)
par.add_argument(
"--dataset-path",
type=str,
required=True,
help="The root path to where the dataset is stored",
)
par.add_argument(
"--dataset-kwargs",
type=json.loads,
default={},
help="kew word arguments to be passed to dataset constructor, should be "
" given as a json object",
)
# logging and saving
par.add_argument(
"--model-tag",
type=str,
default=None,
help="A tag to use for the model for saving results under save-dir, "
"defaults to the model arch and dataset used",
)
par.add_argument(
"--save-dir",
type=str,
default="tensorflow_v1_classification",
help="The path to the directory for saving results",
)
# task specific arguments
if par in [train_parser, pruning_sensitivity_parser]:
par.add_argument(
"--dataset-parallel-calls",
type=int,
default=4,
help="the number of parallel workers for dataset loading",
)
par.add_argument(
"--shuffle-buffer-size",
type=int,
default=1000,
help="Shuffle buffer size for dataset loading",
)
if par == train_parser:
par.add_argument(
"--recipe-path",
type=str,
default=None,
help="The path to the yaml file containing the modifiers and "
"schedule to apply them with. If set to 'transfer_learning', "
"then will create a schedule to enable sparse transfer learning",
)
par.add_argument(
"--sparse-transfer-learn",
action="store_true",
help=(
"Enable sparse transfer learning modifiers to enforce the sparsity "
"for already sparse layers. The modifiers are added to the "
"ones to be loaded from the recipe-path"
),
)
par.add_argument(
"--eval-mode",
action="store_true",
help="Puts into evaluation mode so that the model can be "
"evaluated on the desired dataset",
)
par.add_argument(
"--train-batch-size",
type=int,
required=True,
help="The batch size to use while training",
)
par.add_argument(
"--test-batch-size",
type=int,
required=True,
help="The batch size to use while testing",
)
par.add_argument(
"--logs-dir",
type=str,
default=os.path.join(
"tensorflow_v1_classification_train", "tensorboard-logs"
),
help="The path to the directory for saving logs",
)
par.add_argument(
"--save-best-after",
type=int,
default=-1,
help="start saving the best validation result after the given "
"epoch completes until the end of training",
)
par.add_argument(
"--save-epochs",
type=int,
default=[],
nargs="+",
help="epochs to save checkpoints at",
)
par.add_argument(
"--init-lr",
type=float,
default=1e-9,
help="The initial learning rate to use while training, "
"the actual initial value used should be set by the sparseml recipe",
)
par.add_argument(
"--optim-args",
type=json.loads,
default={},
help="Additional args to be passed to the optimizer passed in"
" as a json object",
)
if par == export_parser:
par.add_argument(
"--num-samples",
type=int,
default=100,
help="The number of samples to export along with the model onnx "
"and pth files (sample inputs and labels as well as the outputs "
"from model execution)",
)
par.add_argument(
"--onnx-opset",
type=int,
default=11,
help="The onnx opset to use for export. Default is 11",
)
if par == pruning_sensitivity_parser:
par.add_argument(
"--approximate",
action="store_true",
help="True to approximate without running data through the model, "
"otherwise will run a one shot analysis",
)
par.add_argument(
"--steps-per-measurement",
type=int,
default=15,
help="The number of steps (batches) to run for each measurement",
)
par.add_argument(
"--batch-size",
type=int,
default=64,
help="The batch size to use while performing analysis",
)
return parser.parse_args()
def _setup_save_dirs(args) -> Tuple[str, Optional[str]]:
# logging and saving setup
save_dir = os.path.abspath(os.path.expanduser(args.save_dir))
logs_dir = (
os.path.abspath(os.path.expanduser(os.path.join(args.logs_dir)))
if args.command == TRAIN_COMMAND
else None
)
if not args.model_tag:
model_tag = "{}_{}".format(args.arch_key.replace("/", "."), args.dataset)
model_id = model_tag
model_inc = 0
# set location to check for models with same name
model_main_dir = logs_dir or save_dir
while os.path.exists(os.path.join(model_main_dir, model_id)):
model_inc += 1
model_id = "{}__{:02d}".format(model_tag, model_inc)
else:
model_id = args.model_tag
save_dir = os.path.join(save_dir, model_id)
create_dirs(save_dir)
# logs dir setup
if args.command == TRAIN_COMMAND:
logs_dir = os.path.join(logs_dir, model_id)
create_dirs(logs_dir)
else:
logs_dir = None
LOGGER.info("Model id is set to {}".format(model_id))
return save_dir, logs_dir
def _create_dataset(args, train=True, image_size=None) -> Tuple[Dataset, int]:
kwargs = args.dataset_kwargs
if "image_size" in kwargs:
image_size = kwargs["image_size"]
del kwargs["image_size"]
dataset = DatasetRegistry.create(
args.dataset,
root=args.dataset_path,
train=train,
image_size=image_size,
**kwargs,
)
LOGGER.info("created {} dataset: {}".format("train" if train else "val", dataset))
# get num_classes
if args.dataset == "imagefolder":
num_classes = dataset.num_classes
else:
dataset_attributes = DatasetRegistry.attributes(args.dataset)
num_classes = dataset_attributes["num_classes"]
return dataset, num_classes
def _build_dataset(args, dataset: Dataset, batch_size: int) -> Dataset:
return dataset.build(
batch_size,
shuffle_buffer_size=args.shuffle_buffer_size,
prefetch_buffer_size=batch_size,
num_parallel_calls=args.dataset_parallel_calls,
)
def _create_model(args, num_classes, inputs, training=False):
outputs = ModelRegistry.create(
args.arch_key,
inputs,
training=training,
num_classes=num_classes,
**args.model_kwargs,
)
LOGGER.info("created model {}".format(args.arch_key))
return outputs
def _load_model(args, sess, checkpoint_path=None):
sess.run(
[
tf_compat.global_variables_initializer(),
tf_compat.local_variables_initializer(),
]
)
checkpoint_path = checkpoint_path or args.checkpoint_path
ModelRegistry.load_pretrained(
args.arch_key,
pretrained=args.pretrained,
pretrained_dataset=args.pretrained_dataset,
pretrained_path=checkpoint_path,
sess=sess,
)
if checkpoint_path:
LOGGER.info("Loaded model weights from checkpoint: {}".format(checkpoint_path))
def _save_checkpoint(args, sess, save_dir, checkpoint_name) -> str:
checkpoint_path = os.path.join(os.path.join(save_dir, checkpoint_name, "model"))
create_dirs(checkpoint_path)
saver = ModelRegistry.saver(args.arch_key)
saved_name = saver.save(sess, checkpoint_path)
checkpoint_path = os.path.join(checkpoint_path, saved_name)
LOGGER.info("Checkpoint saved to {}".format(checkpoint_path))
return checkpoint_path
def _save_recipe(
recipe_manager: ScheduledModifierManager,
save_dir: str,
):
recipe_save_path = os.path.join(save_dir, "recipe.yaml")
recipe_manager.save(recipe_save_path)
LOGGER.info(f"Saved recipe to {recipe_save_path}")
def train(args, save_dir, logs_dir):
# setup dataset
with tf_compat.device("/cpu:0"):
train_dataset, _ = _create_dataset(args, train=True)
val_dataset, num_classes = _create_dataset(args, train=False)
# calc steps
train_steps = math.ceil(len(train_dataset) / args.train_batch_size)
val_steps = math.ceil(len(val_dataset) / args.test_batch_size)
# build datasets
train_dataset = _build_dataset(args, train_dataset, args.train_batch_size)
val_dataset = _build_dataset(args, val_dataset, args.test_batch_size)
handle, iterator, (train_iter, val_iter) = create_split_iterators_handle(
[train_dataset, val_dataset]
)
# set up model graph
images, labels = iterator.get_next()
training = tf_compat.placeholder(dtype=tf_compat.bool, shape=[])
outputs = _create_model(args, num_classes, images, training)
# set up training objects
loss = batch_cross_entropy_loss(outputs, labels)
acc = accuracy(outputs, labels)
global_step = tf_compat.train.get_or_create_global_step()
train_op = tf_compat.train.AdamOptimizer(
learning_rate=args.init_lr, **args.optim_args
).minimize(loss, global_step=global_step)
update_ops = tf_compat.get_collection(tf_compat.GraphKeys.UPDATE_OPS)
LOGGER.info("Created update ops for training")
# set up sparseml modifier ops
add_mods = (
ConstantPruningModifier(params="__ALL__")
if args.sparse_transfer_learn
else None
)
manager = ScheduledModifierManager.from_yaml(
file_path=args.recipe_path, add_modifiers=add_mods
)
mod_ops, mod_extras = manager.create_ops(train_steps, global_step)
_save_recipe(recipe_manager=manager, save_dir=save_dir)
with tf_compat.Session() as sess:
# set up tensorboard logging
summary_writer = tf_compat.summary.FileWriter(logs_dir, sess.graph)
summaries = tf_compat.summary.merge_all()
LOGGER.info("Logging to tensorboard at {}".format(logs_dir))
# initialize variables, load pretrained weights, initialize modifiers
train_iter_handle, val_iter_handle = sess.run(
[train_iter.string_handle(), val_iter.string_handle()]
)
LOGGER.info("Initialized graph variables")
_load_model(args, sess)
manager.initialize_session()
LOGGER.info("Initialized SparseML modifiers")
best_loss = None
for epoch in range(manager.max_epochs):
# train
LOGGER.info("Training for epoch {}...".format(epoch))
sess.run(train_iter.initializer)
train_acc, train_loss = [], []
for step in range(train_steps):
_, __, meas_step, meas_loss, meas_acc, meas_summ = sess.run(
[train_op, update_ops, global_step, loss, acc, summaries],
feed_dict={handle: train_iter_handle, training: True},
)
if step >= train_steps - 1:
# log the general summaries on the last training step
summary_writer.add_summary(meas_summ, meas_step)
# run modifier ops
sess.run(mod_ops)
# summarize
write_simple_summary(summary_writer, "Train/Loss", meas_loss, meas_step)
write_simple_summary(
summary_writer, "Train/Acc", meas_acc * 100.0, meas_step
)
train_acc.append(meas_acc)
train_loss.append(meas_loss)
LOGGER.info(
"Epoch {} - Train Loss: {}, Train Acc: {}".format(
epoch, numpy.mean(train_loss).item(), numpy.mean(train_acc).item()
)
)
# val
LOGGER.info("Validating for epoch {}...".format(epoch))
sess.run(val_iter.initializer)
val_acc, val_loss = [], []
for step in range(val_steps):
meas_loss, meas_acc = sess.run(
[loss, acc],
feed_dict={handle: val_iter_handle, training: False},
)
val_acc.append(meas_acc)
val_loss.append(meas_loss)
write_simple_summary(
summary_writer, "Val/Loss", numpy.mean(val_loss).item(), epoch
)
write_simple_summary(
summary_writer, "Val/Acc", numpy.mean(val_acc).item(), epoch
)
val_loss = numpy.mean(val_loss).item()
LOGGER.info(
"Epoch {} - Val Loss: {}, Val Acc: {}".format(
epoch, val_loss, numpy.mean(train_acc).item()
)
)
if epoch >= args.save_best_after and (
best_loss is None or val_loss <= best_loss
):
_save_checkpoint(args, sess, save_dir, "checkpoint-best")
best_loss = val_loss
if args.save_epochs and epoch in args.save_epochs:
_save_checkpoint(
args, sess, save_dir, "checkpoint-epoch-{}".format(epoch)
)
# cleanup graph and save final checkpoint
manager.complete_graph()
checkpoint_path = _save_checkpoint(args, sess, save_dir, "final-checkpoint")
LOGGER.info("Running ONNX export flow")
export(
args,
save_dir,
checkpoint_path=checkpoint_path,
skip_samples=True,
num_classes=num_classes,
opset=11,
)
def export(
args,
save_dir,
checkpoint_path=None,
skip_samples=False,
num_classes=None,
opset=None,
):
assert not skip_samples or num_classes
# dataset creation
if not skip_samples:
val_dataset, num_classes = _create_dataset(args, train=False)
with tf_compat.Graph().as_default():
input_shape = ModelRegistry.input_shape(args.arch_key)
inputs = tf_compat.placeholder(
tf_compat.float32, [None] + list(input_shape), name="inputs"
)
outputs = _create_model(args, num_classes, inputs)
with tf_compat.Session() as sess:
_load_model(
args, sess, checkpoint_path=checkpoint_path or args.checkpoint_path
)
exporter = GraphExporter(save_dir)
if not skip_samples:
# Export a batch of samples and expected outputs
tf_dataset = val_dataset.build(
args.num_samples, repeat_count=1, num_parallel_calls=1
)
tf_iter = tf_compat.data.make_one_shot_iterator(tf_dataset)
features, _ = tf_iter.get_next()
inputs_val = sess.run(features)
exporter.export_samples([inputs], [inputs_val], [outputs], sess)
# Export model to tensorflow checkpoint format
LOGGER.info("exporting tensorflow in {}".format(save_dir))
exporter.export_checkpoint(sess=sess)
# Export model to pb format
LOGGER.info("exporting pb in {}".format(exporter.pb_path))
exporter.export_pb(outputs=[outputs])
# Export model to onnx format
LOGGER.info("exporting onnx in {}".format(exporter.onnx_path))
exporter.export_onnx([inputs], [outputs], opset=opset or args.onnx_opset)
def pruning_loss_sensitivity(args, save_dir):
input_shape = ModelRegistry.input_shape(args.arch_key)
train_dataset, num_classes = _create_dataset(
args, train=True, image_size=input_shape[1]
)
with tf_compat.Graph().as_default() as graph:
# create model graph
inputs = tf_compat.placeholder(
tf_compat.float32, [None] + list(input_shape), name="inputs"
)
outputs = _create_model(args, num_classes, inputs)
with tf_compat.Session() as sess:
_load_model(args, sess, checkpoint_path=args.checkpoint_path)
if args.approximate:
LOGGER.info("Running weight magnitude loss sensitivity analysis...")
analysis = pruning_loss_sens_magnitude(graph, sess)
else:
op_vars = pruning_loss_sens_op_vars(graph)
train_steps = math.ceil(len(train_dataset) / args.batch_size)
train_dataset = _build_dataset(args, train_dataset, args.batch_size)
handle, iterator, dataset_iter = create_split_iterators_handle(
[train_dataset]
)
dataset_iter = dataset_iter[0]
images, labels = iterator.get_next()
loss = batch_cross_entropy_loss(outputs, labels)
tensor_names = ["inputs:0", labels.name]
sess.run(dataset_iter.initializer)
def feed_dict_creator(step: int) -> Dict[str, tf_compat.Tensor]:
assert step < train_steps
batch_data = [
tens.eval(session=sess) for tens in dataset_iter.get_next()
]
return dict(zip(tensor_names, batch_data))
LOGGER.info("Running one shot loss sensitivity analysis...")
analysis = pruning_loss_sens_one_shot(
op_vars=op_vars,
loss_tensor=loss,
steps_per_measurement=args.steps_per_measurement,
feed_dict_creator=feed_dict_creator,
sess=sess,
)
# saving and printing results
LOGGER.info("completed...")
LOGGER.info("Saving results in {}".format(save_dir))
analysis.save_json(
os.path.join(
save_dir,
"ks_approx_sensitivity.json"
if args.approximate
else "ks_one_shot_sensitivity.json",
)
)
analysis.plot(
os.path.join(
save_dir,
os.path.join(
save_dir,
"ks_approx_sensitivity.png"
if args.approximate
else "ks_one_shot_sensitivity.png",
),
),
plot_integral=True,
)
analysis.print_res()
def main(args):
# set up saving and logging dirs
save_dir, logs_dir = _setup_save_dirs(args)
# RUN COMMAND SPECIFIC TASTS
if args.command == TRAIN_COMMAND:
train(args, save_dir, logs_dir)
if args.command == EXPORT_COMMAND:
export(args, save_dir)
if args.command == PRUNING_SENSITVITY_COMMAND:
pruning_loss_sensitivity(args, save_dir)
if __name__ == "__main__":
args_ = parse_args()
main(args_)
|
deepchem/splits/__init__.py | cjgalvin/deepchem | 3,782 | 12768190 | <gh_stars>1000+
"""
Gathers all splitters in one place for convenient imports
"""
# flake8: noqa
# basic splitter
from deepchem.splits.splitters import Splitter
from deepchem.splits.splitters import RandomSplitter
from deepchem.splits.splitters import RandomStratifiedSplitter
from deepchem.splits.splitters import RandomGroupSplitter
from deepchem.splits.splitters import SingletaskStratifiedSplitter
from deepchem.splits.splitters import IndexSplitter
from deepchem.splits.splitters import SpecifiedSplitter
# molecule splitter
from deepchem.splits.splitters import ScaffoldSplitter
from deepchem.splits.splitters import MolecularWeightSplitter
from deepchem.splits.splitters import MaxMinSplitter
from deepchem.splits.splitters import FingerprintSplitter
from deepchem.splits.splitters import ButinaSplitter
# other splitter
from deepchem.splits.task_splitter import merge_fold_datasets
from deepchem.splits.task_splitter import TaskSplitter
#################################################################
# Removed API
#################################################################
import logging
logger = logging.getLogger(__name__)
class IndiceSplitter:
def __init__(self, valid_indices=None, test_indices=None):
raise ImportError("IndiceSplitter was renamed to SpecifiedSplitter.\n"
"Please use SpecifiedSplitter instead of IndiceSplitter.")
|
neuralcompression/layers/_generalized_divisive_normalization.py | tallamjr/NeuralCompression | 233 | 12768194 | <reponame>tallamjr/NeuralCompression
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
from typing import Callable, Optional
import torch
import torch.nn.functional
from torch import Tensor
from torch.nn import Module, Parameter
from ._non_negative_parameterization import NonNegativeParameterization
class GeneralizedDivisiveNormalization(Module):
"""Applies generalized divisive normalization for each channel across a
batch of data.
Implements an activation function that is a multivariate generalization of
the following sigmoid-like function:
.. math::
y_{i}=\\frac{x_{i}}{(\\beta_{i}+\\sum_{j}\\gamma_{ij}|x_{j}|^{\\alpha_{ij}})^{\\epsilon_{i}}}
where :math:`i` and :math:`j` map over channels.
This implementation never sums across spatial dimensions. It is similar to
local response normalization, but much more flexible, as :math:`\\alpha`,
:math:`\\beta`, :math:`\\gamma`, and :math:`\\epsilon` are trainable
parameters.
The method was originally described in:
| “Density Modeling of Images using a Generalized Normalization
Transformation”
| <NAME>, <NAME>, <NAME>
| https://arxiv.org/abs/1511.06281
and expanded in:
| “End-to-end Optimized Image Compression”
| <NAME>, <NAME>, <NAME>
| https://arxiv.org/abs/1611.01704
Args:
channels: number of channels in the input.
inverse: compute the generalized divisive normalization response. If
``True``, compute the inverse generalized divisive normalization
response (one step of fixed point iteration to invert the
generalized divisive normalization; the division is replaced by
multiplication).
alpha_parameter: A ``Tensor`` means that the value of ``alpha`` is
fixed. ``None`` means that when the layer is initialized, a
``NonNegativeParameterization`` layer is created to train ``alpha``
(with a minimum value of ``1``). The default is a fixed value of
``1``.
beta_parameter: A ``Tensor`` means that the value of ``beta`` is fixed.
``None`` means that when the layer is initialized, a
``NonNegativeParameterization`` layer is created to train ``beta``
(with a minimum value of ``1e-6``).
epsilon_parameter: A ``Tensor`` means that the value of ``epsilon`` is
fixed. ``None`` means that when the layer is initialized, a
``NonNegativeParameterization`` layer is created to train
``epsilon`` (with a minimum value of 1e-6). The default is a fixed
value of ``1``.
gamma_parameter: A ``Tensor`` means that the value of ``gamma`` is
fixed. ``None`` means that when the layer is initialized, a
``NonNegativeParameterization`` layer is created to train
``gamma``.
alpha_initializer: initializes the ``alpha`` parameter. Only used if
``alpha`` is trained. Defaults to ``1``.
beta_initializer: initializes the ``beta`` parameter. Only used if
``beta`` is created when initializing the layer. Defaults to ``1``.
epsilon_initializer: initializes the ``epsilon`` parameter. Only used
if ``epsilon`` is trained. Defaults to ``1``.
gamma_initializer: initializes the ``gamma`` parameter. Only used if
``gamma`` is created when initializing the layer. Defaults to the
identity multiplied by ``0.1``. A good default value for the
diagonal is somewhere between ``0`` and ``0.5``. If set to ``0``
and ``beta`` is initialized as ``1``, the layer is effectively
initialized to the identity operation.
"""
alpha: Parameter
beta: Parameter
epsilon: Parameter
gamma: Parameter
def __init__(
self,
channels: int,
inverse: bool = False,
alpha_parameter: Optional[Tensor] = None,
beta_parameter: Optional[Tensor] = None,
epsilon_parameter: Optional[Tensor] = None,
gamma_parameter: Optional[Tensor] = None,
alpha_initializer: Optional[Callable[[Tensor], Tensor]] = None,
beta_initializer: Optional[Callable[[Tensor], Tensor]] = None,
epsilon_initializer: Optional[Callable[[Tensor], Tensor]] = None,
gamma_initializer: Optional[Callable[[Tensor], Tensor]] = None,
):
super(GeneralizedDivisiveNormalization, self).__init__()
self._channels = torch.tensor(channels, dtype=torch.int32)
self._inverse = inverse
if alpha_parameter is None:
if alpha_initializer is None:
alpha_initializer = functools.partial(
lambda x: torch.ones(x),
)
self._reparameterized_alpha = NonNegativeParameterization(
alpha_initializer(self._channels),
minimum=1,
)
if self._reparameterized_alpha.initial_value is not None:
self.alpha = Parameter(
self._reparameterized_alpha.initial_value,
)
else:
if isinstance(alpha_parameter, Parameter):
self.alpha = alpha_parameter
else:
alpha_parameter = torch.tensor(alpha_parameter)
self.alpha = Parameter(alpha_parameter)
if beta_parameter is None:
if beta_initializer is None:
beta_initializer = functools.partial(
lambda x: torch.ones(x),
)
self._reparameterized_beta = NonNegativeParameterization(
beta_initializer(self._channels),
minimum=1e-6,
)
if self._reparameterized_beta.initial_value is not None:
self.beta = Parameter(
self._reparameterized_beta.initial_value,
)
else:
if isinstance(beta_parameter, Parameter):
self.beta = beta_parameter
else:
beta_parameter = torch.tensor(beta_parameter)
self.beta = Parameter(beta_parameter)
if epsilon_parameter is None:
if epsilon_initializer is None:
epsilon_initializer = functools.partial(
lambda x: torch.ones(x),
)
self._reparameterized_epsilon = NonNegativeParameterization(
epsilon_initializer(self._channels),
minimum=1e-6,
)
if self._reparameterized_epsilon.initial_value is not None:
self.epsilon = Parameter(
self._reparameterized_epsilon.initial_value,
)
else:
if isinstance(epsilon_parameter, Parameter):
self.epsilon = epsilon_parameter
else:
epsilon_parameter = torch.tensor(epsilon_parameter)
self.epsilon = Parameter(epsilon_parameter)
if gamma_parameter is None:
if gamma_initializer is None:
gamma_initializer = functools.partial(
lambda x: 0.1 * torch.eye(x),
)
self._reparameterized_gamma = NonNegativeParameterization(
gamma_initializer(self._channels),
minimum=0,
)
if self._reparameterized_gamma.initial_value is not None:
self.gamma = Parameter(
self._reparameterized_gamma.initial_value,
)
else:
if isinstance(gamma_parameter, Parameter):
self.gamma = gamma_parameter
else:
gamma_parameter = torch.tensor(gamma_parameter)
self.gamma = Parameter(gamma_parameter)
def forward(self, x: Tensor) -> Tensor:
_, channels, _, _ = x.size()
y = torch.nn.functional.conv2d(
x ** 2,
torch.reshape(
self._reparameterized_gamma(self.gamma),
(channels, channels, 1, 1),
),
self._reparameterized_beta(self.beta),
)
if self._inverse:
return x * torch.sqrt(y)
return x * torch.rsqrt(y)
|
tests-deprecating/milvus_benchmark/milvus_benchmark/env/local.py | CyberFlameGO/milvus | 10,504 | 12768232 | <reponame>CyberFlameGO/milvus<filename>tests-deprecating/milvus_benchmark/milvus_benchmark/env/local.py
import logging
from milvus_benchmark.env.base import BaseEnv
logger = logging.getLogger("milvus_benchmark.env.local")
class LocalEnv(BaseEnv):
"""docker env class wrapper"""
env_mode = "local"
def __init__(self, deploy_mode=None):
super(LocalEnv, self).__init__(deploy_mode)
def start_up(self, hostname, port):
res = True
try:
self.set_hostname(hostname)
except Exception as e:
logger.error(str(e))
res = False
return res
|
lib/matplotlib/tests/test_gridspec.py | jbbrokaw/matplotlib | 113 | 12768253 | import matplotlib.gridspec as gridspec
from nose.tools import assert_equal
def test_equal():
gs = gridspec.GridSpec(2, 1)
assert_equal(gs[0, 0], gs[0, 0])
assert_equal(gs[:, 0], gs[:, 0])
|
examples/password_git.py | qualichat/questionary | 851 | 12768255 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""Example for a password question type.
Run example by typing `python -m examples.password` in your console."""
from pprint import pprint
import questionary
from examples import custom_style_dope
from questionary import Separator, Choice, prompt
def ask_pystyle(**kwargs):
# create the question object
question = questionary.password(
"Enter your git password", style=custom_style_dope, **kwargs
)
# prompt the user for an answer
return question.ask()
def ask_dictstyle(**kwargs):
questions = [
{"type": "password", "message": "Enter your git password", "name": "password"}
]
return prompt(questions, style=custom_style_dope, **kwargs)
if __name__ == "__main__":
pprint(ask_pystyle())
|
setup.py | john-veillette/pymc-learn | 187 | 12768267 | <gh_stars>100-1000
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# from builtins import *
from codecs import open
from os.path import realpath, dirname, join
from setuptools import setup, find_packages
import sys
import re
DISTNAME = 'pymc-learn'
DESCRIPTION = "Practical Probabilistic Machine Learning in Python"
AUTHOR = 'Pymc-Learn Team'
AUTHOR_EMAIL = '<EMAIL>'
URL = "https://github.com/pymc-learn/pymc-learn"
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Operating System :: OS Independent'
]
PROJECT_ROOT = dirname(realpath(__file__))
with open(join(PROJECT_ROOT, 'README.rst'), encoding='utf-8') as r:
readme = r.read()
REQUIREMENTS_FILE = join(PROJECT_ROOT, 'requirements.txt')
with open(REQUIREMENTS_FILE) as f:
install_reqs = f.read().splitlines()
if sys.version_info < (3, 4):
install_reqs.append('enum34')
def get_version():
VERSIONFILE = join('pmlearn', '__init__.py')
lines = open(VERSIONFILE, 'rt').readlines()
version_regex = r"^__version__ = ['\"]([^'\"]*)['\"]"
for line in lines:
mo = re.search(version_regex, line, re.M)
if mo:
return mo.group(1)
raise RuntimeError('Unable to find version in %s.' % (VERSIONFILE,))
with open('AUTHORS.txt') as a:
# reSt-ify the authors list
authors = ''
for author in a.read().split('\n'):
authors += '| '+author+'\n'
with open('LICENSE') as l:
license = l.read()
if __name__ == "__main__":
setup(
name=DISTNAME,
version=get_version(),
description=DESCRIPTION,
long_description=readme,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=license,
packages=find_packages(),
package_data={'docs': ['*']},
include_package_data=True,
zip_safe=False,
install_requires=install_reqs,
classifiers=classifiers
) |
fairness/algorithms/zafar/fair-classification-master/preferential_fairness/synthetic_data_demo/plot_synthetic_boundaries.py | yashwarlord/fairness-comparison | 146 | 12768308 | <gh_stars>100-1000
import matplotlib
import matplotlib.pyplot as plt # for plotting stuff
import os
import numpy as np
matplotlib.rcParams['text.usetex'] = True # for type-1 fonts
def get_line_coordinates(w, x1, x2):
y1 = (-w[0] - (w[1] * x1)) / w[2]
y2 = (-w[0] - (w[1] * x2)) / w[2]
return y1,y2
def plot_data(X, y, x_sensitive, w_arr, label_arr, lt_arr, fname, title, group=None):
# print fp_fn_arr
plt.figure()
num_to_draw = 200 # we will only draw a small number of points to avoid clutter
fs = 20 # font size for labels and legends
x_draw = X[:num_to_draw]
y_draw = y[:num_to_draw]
x_sensitive_draw = x_sensitive[:num_to_draw]
x_lim = [min(x_draw[:,-2]) - np.absolute(0.3*min(x_draw[:,-2])), max(x_draw[:,-2]) + np.absolute(0.5 * max(x_draw[:,-2]))]
y_lim = [min(x_draw[:,-1]) - np.absolute(0.3*min(x_draw[:,-1])), max(x_draw[:,-1]) + np.absolute(0.7 * max(x_draw[:,-1]))]
X_s_0 = x_draw[x_sensitive_draw == 0.0]
X_s_1 = x_draw[x_sensitive_draw == 1.0]
y_s_0 = y_draw[x_sensitive_draw == 0.0]
y_s_1 = y_draw[x_sensitive_draw == 1.0]
if w_arr is not None: # we are plotting the boundaries of a trained classifier
plt.scatter(X_s_0[y_s_0==1.0][:, -2], X_s_0[y_s_0==1.0][:, -1], color='green', marker='x', s=70, linewidth=2)
plt.scatter(X_s_0[y_s_0==-1.0][:, -2], X_s_0[y_s_0==-1.0][:, -1], color='red', marker='x', s=70, linewidth=2)
plt.scatter(X_s_1[y_s_1==1.0][:, -2], X_s_1[y_s_1==1.0][:, -1], color='green', marker='o', facecolors='none', s=70, linewidth=2)
plt.scatter(X_s_1[y_s_1==-1.0][:, -2], X_s_1[y_s_1==-1.0][:, -1], color='red', marker='o', facecolors='none', s=70, linewidth=2)
for i in range(0, len(w_arr)):
w = w_arr[i]
l = label_arr[i]
lt = lt_arr[i]
x1,x2 = min(x_draw[:,1]), max(x_draw[:,1])
y1,y2 = get_line_coordinates(w, x1, x2)
plt.plot([x1,x2], [y1,y2], lt, linewidth=3, label = l)
plt.title(title, fontsize=fs)
else: # just plotting the data
plt.scatter(X_s_0[y_s_0==1.0][:, -2], X_s_0[y_s_0==1.0][:, -1], color='green', marker='x', s=70, linewidth=2, label= "group-0 +ve")
plt.scatter(X_s_0[y_s_0==-1.0][:, -2], X_s_0[y_s_0==-1.0][:, -1], color='red', marker='x', s=70, linewidth=2, label= "group-0 -ve")
plt.scatter(X_s_1[y_s_1==1.0][:, -2], X_s_1[y_s_1==1.0][:, -1], color='green', marker='o', facecolors='none', s=70, linewidth=2, label= "group-1 +ve")
plt.scatter(X_s_1[y_s_1==-1.0][:, -2], X_s_1[y_s_1==-1.0][:, -1], color='red', marker='o', facecolors='none', s=70, linewidth=2, label= "group-1 -ve")
if True: # turn the ticks on or off
plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off') # dont need the ticks to see the data distribution
plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')
plt.legend(loc=2, fontsize=fs)
plt.xlim(x_lim)
plt.ylim(y_lim)
plt.savefig(fname)
plt.show()
|
cactus/mime.py | danielchasehooper/Cactus | 1,048 | 12768346 | <filename>cactus/mime.py
import os
import mimetypes
MIMETYPE_MAP = {
'.js': 'text/javascript',
'.mov': 'video/quicktime',
'.mp4': 'video/mp4',
'.m4v': 'video/x-m4v',
'.3gp': 'video/3gpp',
'.woff': 'application/font-woff',
'.eot': 'application/vnd.ms-fontobject',
'.ttf': 'application/x-font-truetype',
'.otf': 'application/x-font-opentype',
'.svg': 'image/svg+xml',
}
MIMETYPE_DEFAULT = 'application/octet-stream'
def guess(path):
if not path:
return MIMETYPE_DEFAULT
base, ext = os.path.splitext(path)
if ext.lower() in MIMETYPE_MAP:
return MIMETYPE_MAP[ext.lower()]
mime_type, encoding = mimetypes.guess_type(path)
if mime_type:
return mime_type
return MIMETYPE_DEFAULT
|
Python/Tests/TestData/AstAnalysis/Functions.py | techkey/PTVS | 695 | 12768359 | def f():
'''f'''
pass
def f1(): pass
f2 = f
if True:
def g(): pass
else:
def h(): pass
class C:
def i(self): pass
def j(self):
def j2(self):
pass
class C2:
def k(self): pass
|
furnace/datasets/voc/voc.py | Yongjin-colin-choi/TorchSemiSeg | 1,439 | 12768374 | <filename>furnace/datasets/voc/voc.py
#!/usr/bin/env python3
# encoding: utf-8
# @Time : 2017/12/16 下午8:41
# @Author : yuchangqian
# @Contact : <EMAIL>
# @File : mclane.py
from datasets.BaseDataset import BaseDataset
class VOC(BaseDataset):
@classmethod
def get_class_colors(*args):
return [[0, 0, 0], [0, 0, 128], [0, 128, 0], [0, 128, 128],
[128, 0, 0], [128, 0, 128], [128, 128, 0],
[128, 128, 128],
[0, 0, 64], [0, 0, 192], [0, 128, 64],
[0, 128, 192],
[128, 0, 64], [128, 0, 192], [128, 128, 64],
[128, 128, 192], [0, 64, 0], [0, 64, 128],
[0, 192, 0],
[0, 192, 128], [128, 64, 0], ]
@classmethod
def get_class_names(*args):
return ['background', 'aeroplane', 'bicycle', 'bird',
'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable',
'dog', 'horse', 'motorbike', 'person',
'pottedplant',
'sheep', 'sofa', 'train', 'tv/monitor']
if __name__ == "__main__":
data_setting = {'img_root': '/unsullied/sharefs/g:research_detection/GeneralDetection/VOC/VOC/VOC2012_AUG/',
'gt_root': '/unsullied/sharefs/g:research_detection/GeneralDetection/VOC/VOC/VOC2012_AUG',
'train_source': '/unsullied/sharefs/g:research_detection/GeneralDetection/VOC/VOC/VOC2012_AUG/config/train.txt',
'eval_source': '/unsullied/sharefs/g:research_detection/GeneralDetection/VOC/VOC/VOC2012_AUG/config/val.txt'}
voc = VOC(data_setting, 'train', None)
print(voc.get_class_names())
print(voc.get_length())
print(next(iter(voc)))
|
koku/api/common/permissions/test/test_ocp_all_access.py | rubik-ai/koku | 157 | 12768386 | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
from itertools import chain
from itertools import combinations
from unittest.mock import Mock
from django.test import TestCase
from api.common.permissions.openshift_all_access import OpenshiftAllAccessPermission
from api.iam.models import User
from api.provider.models import Provider
ACCESS_KEYS = {
Provider.PROVIDER_AWS: {"aws.account": {"read": ["*"]}},
Provider.PROVIDER_AZURE: {"azure.subscription_guid": {"read": ["*"]}},
Provider.PROVIDER_OCP: {"openshift.cluster": {"read": ["*"]}},
}
class OCPAllAccessPermissionTest(TestCase):
"""Test the OCP-on-All access permissions."""
def test_has_perm_with_access_on_get(self):
"""Test that a user with at least 1 access can execute."""
accessPerm = OpenshiftAllAccessPermission()
s = ACCESS_KEYS.keys()
for key in chain.from_iterable(combinations(s, r) for r in range(1, len(s) + 1)):
with self.subTest(permission=key):
access = {}
for k in key:
access.update(ACCESS_KEYS[k])
user = Mock(spec=User, access=access, admin=False)
req = Mock(user=user, method="GET")
result = accessPerm.has_permission(request=req, view=None)
self.assertTrue(result)
|
src/PlugIns/PE/ResourceEntriesPlug.py | codexgigassys/codex-backend | 161 | 12768391 | <gh_stars>100-1000
# Copyright (C) 2016 <NAME>.
# This file is part of CodexGigas - https://github.com/codexgigassys/
# See the file 'LICENSE' for copying permission.
from PlugIns.PlugIn import PlugIn
from Modules.PEFileModule import PEFileModule
import pefile
from Utils.InfoExtractor import *
class ResourceEntriesPlug(PlugIn):
def __init__(self, sample=None):
PlugIn.__init__(self, sample)
def getPath(self):
return "particular_header.res_entries"
def getName(self):
return "res_entries"
def getVersion(self):
return 6
def process(self):
pelib = self._getLibrary(PEFileModule().getName())
if(pelib is None):
return ""
ret = []
if hasattr(pelib, 'DIRECTORY_ENTRY_RESOURCE'):
i = 0
for resource_type in pelib.DIRECTORY_ENTRY_RESOURCE.entries:
if resource_type.name is not None:
name = "%s" % resource_type.name
else:
name = "%s" % pefile.RESOURCE_TYPE.get(
resource_type.struct.Id)
if name is None:
name = "%d" % resource_type.struct.Id
if hasattr(resource_type, 'directory'):
for resource_id in resource_type.directory.entries:
if hasattr(resource_id, 'directory'):
for resource_lang in resource_id.directory.entries:
try:
data = pelib.get_data(
resource_lang.data.struct.OffsetToData, resource_lang.data.struct.Size)
# fd=open(name,'wb')
# fd.write(data)
# (data)
except pefile.PEFormatError:
return "corrupt"
filetype = MIME_TYPE(data, False)
lang = pefile.LANG.get(
resource_lang.data.lang, 'unknown')
sublang = pefile.get_sublang_name_for_lang(
resource_lang.data.lang, resource_lang.data.sublang)
entry = {}
entry["name"] = self._normalize(name)
entry["rva"] = self._normalize(
hex(resource_lang.data.struct.OffsetToData))
entry["size"] = self._normalize(
hex(resource_lang.data.struct.Size))
entry["type"] = self._normalize(filetype)
entry["lang"] = self._normalize(lang)
entry["sublang"] = self._normalize(sublang)
entry["sha1"] = SHA1(data)
ret.append(entry)
return ret
|
data_analysis/subsidy_distribution.py | Bermuhz/DataMiningCompetitionFirstPrize | 128 | 12768447 | root_loc = "/Users/mac/Documents/contest/data/original_data/"
file_name = "subsidy_train.txt"
count_0 = 0
count_1000 = 0
count_1500 = 0
count_2000 = 0
lines = open(root_loc + file_name).readlines()
for line in lines:
temps = line.strip("\n").split(",")
subsidy = int(temps[1])
if subsidy == 0:
count_0 += 1
if subsidy == 1000:
count_1000 += 1
if subsidy == 1500:
count_1500 += 1
if subsidy == 2000:
count_2000 += 1
print (str(count_0)+"\n"+str(count_1000)+"\n"+str(count_1500)+"\n"+str(count_2000))
print count_0+count_1000+count_1500+count_2000 |
attic/iterables/almost_aritprog_v0.py | matteoshen/example-code | 5,651 | 12768485 | """
Arithmetic progression class
>>> ap = ArithmeticProgression(1, .5, 3)
>>> list(ap)
[1.0, 1.5, 2.0, 2.5]
"""
from collections import abc
class ArithmeticProgression:
def __init__(self, begin, step, end):
self.begin = begin
self.step = step
self.end = end
def __iter__(self):
return ArithmeticProgressionIterator(self)
class ArithmeticProgressionIterator(abc.Iterator):
def __init__(self, arithmetic_progression):
self._ap = arithmetic_progression
self._index = 0
def __next__(self):
first = type(self._ap.begin + self._ap.step)(self._ap.begin)
result = first + self._ap.step * self._index
if result < self._ap.end:
self._index += 1
return result
else:
raise StopIteration
|
libcity/executor/map_matching_executor.py | moghadas76/test_bigcity | 221 | 12768487 | from logging import getLogger
from libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor
from libcity.utils import get_evaluator
class MapMatchingExecutor(AbstractTraditionExecutor):
def __init__(self, config, model):
self.model = model
self.config = config
self.evaluator = get_evaluator(config)
self.exp_id = self.config.get('exp_id', None)
self.cache_dir = './libcity/cache/{}/model_cache'.format(self.exp_id)
self.evaluate_res_dir = './libcity/cache/{}/evaluate_cache'.format(self.exp_id)
self._logger = getLogger()
def evaluate(self, test_data):
"""
use model to test data
Args:
test_data
"""
result = self.model.run(test_data)
batch = {'route': test_data['route'], 'result': result, 'rd_nwk': test_data['rd_nwk']}
self.evaluator.collect(batch)
self.evaluator.save_result(self.evaluate_res_dir)
def train(self, train_dataloader, eval_dataloader):
"""
对于传统模型,不需要训练
Args:
train_dataloader(torch.Dataloader): Dataloader
eval_dataloader(torch.Dataloader): Dataloader
"""
pass # do nothing
|
applications/pytorch/conformer/tests/convolution.py | payoto/graphcore_examples | 260 | 12768522 | <filename>applications/pytorch/conformer/tests/convolution.py
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import nn
from espnet.nets.pytorch_backend.conformer.convolution import ConvolutionModule
from src.layers.layer_norm import LayerNorm
class ConvolutionModule_cpu(ConvolutionModule):
"""ConvolutionModule in Conformer model.
Args:
channels_ (int): The number of channels_ of conv layers.
kernel_size_ (int): Kernerl size of conv layers.
"""
def __init__(self, channels_, kernel_size_, activation_=nn.ReLU(), bias=True):
"""Construct an ConvolutionModule object."""
super(ConvolutionModule_cpu, self).__init__(channels=channels_, kernel_size=kernel_size_, activation=activation_)
# kernerl_size should be a odd number for 'SAME' padding
assert (kernel_size_ - 1) % 2 == 0
self.pointwise_conv1 = nn.Conv1d(
channels_,
2 * channels_,
kernel_size=1,
stride=1,
padding=0,
bias=bias,
)
self.depthwise_conv = nn.Conv1d(
1 * channels_,
1 * channels_,
kernel_size_,
stride=1,
padding=(kernel_size_ - 1) // 2,
groups=channels_,
bias=bias,
)
# Replace the original batch_norm with layer_norm
self.norm = LayerNorm(1 * channels_, -2)
self.pointwise_conv2 = nn.Conv1d(
1 * channels_,
channels_,
kernel_size=1,
stride=1,
padding=0,
bias=bias,
)
self.activation = activation_
|
tests/test_select.py | timgates42/goless | 266 | 12768527 | import goless
from goless.backends import current as be
from . import BaseTests
class RecvCaseTests(BaseTests):
chansize = 1
def setUp(self):
BaseTests.setUp(self)
self.ch = goless.chan(self.chansize)
self.ca = goless.rcase(self.ch)
def test_ready(self):
self.assertFalse(self.ca.ready())
be.run(self.ch.send, 1)
self.assertTrue(self.ca.ready())
be.run(self.ch.recv)
self.assertFalse(self.ca.ready())
def test_executes(self):
be.run(self.ch.send, 'a')
x = self.ca.exec_()
self.assertEqual(x, 'a')
def test_exec_with_no_body(self):
be.run(self.ch.send, 'a')
ca = goless.rcase(self.ch)
self.assertEqual(ca.exec_(), 'a')
class RecvCaseUnbufferedTests(RecvCaseTests):
chansize = 0
class SendCaseTests(BaseTests):
chansize = 1
def setUp(self):
BaseTests.setUp(self)
self.ch = goless.chan(self.chansize)
self.sendval = 1
self.ca = goless.scase(self.ch, self.sendval)
def test_ready(self):
def assert_default_readiness():
self.assertEquals(self.ca.ready(), self.chansize > 0)
assert_default_readiness()
be.run(self.ch.send)
self.assertFalse(self.ca.ready())
be.run(self.ch.recv)
assert_default_readiness()
be.run(self.ch.send)
self.assertFalse(self.ca.ready())
be.run(self.ch.recv)
assert_default_readiness()
def test_executes(self):
def recv():
a.append(self.ch.recv())
a = []
be.run(recv)
self.ca.exec_()
self.assertEqual(a, [self.sendval])
def test_exec_no_onselected(self):
be.run(self.ch.recv)
self.ca.exec_()
class SendCaseUnbufferedTests(SendCaseTests):
chansize = 0
class SelectTests(BaseTests):
def setUp(self):
BaseTests.setUp(self)
self.chan1 = goless.chan()
def test_select_uses_default(self):
cases = [goless.rcase(self.chan1), goless.dcase()]
result, val = goless.select(cases)
self.assertIs(result, cases[1])
self.assertIsNone(val)
def test_select_chooses_ready_selection(self):
readychan = goless.chan(1)
notreadychan = goless.chan(1)
readychan.send(3)
cases = [goless.rcase(notreadychan), goless.rcase(readychan), goless.dcase()]
result, val = goless.select(cases)
self.assertIs(result, cases[1])
self.assertEqual(val, 3)
def test_select_no_default_no_ready_blocks(self):
chan1 = goless.chan()
chan2 = goless.chan()
a = []
cases = [goless.rcase(chan2), goless.rcase(chan1)]
def sel():
a.append(goless.select(cases))
be.run(sel)
self.assertEqual(a, [])
chan1.send(5)
be.yield_()
self.assertEqual(len(a), 1)
chosen, val = a[0]
self.assertEqual(chosen, cases[1])
self.assertEqual(val, 5)
def test_main_tasklet_can_select(self):
chan1 = goless.chan(1)
cases = [goless.scase(chan1, 3)]
chosen, val = goless.select(cases)
self.assertIs(chosen, cases[0])
self.assertIsNone(val)
def test_raises_if_multiple_default_cases(self):
with self.assertRaises(AssertionError):
goless.select([goless.dcase(), goless.dcase()])
def test_select_accepts_args(self):
chan1 = goless.chan(1)
scase = goless.scase(chan1, 1)
chosen, val = goless.select(scase)
self.assertIs(chosen, scase)
self.assertIsNone(val)
def test_select_raises_for_list_and_args(self):
chan1 = goless.chan(1)
chan2 = goless.chan(1)
chan3 = goless.chan(1)
cases = [goless.scase(chan1, 1), goless.scase(chan2, 2)]
with self.assertRaises(TypeError):
goless.select(cases, chan3)
def test_select_with_no_args_should_do_nothing(self):
goless.select()
goless.select([])
def test_raises_deadlock_if_no_goroutines(self):
with self.assertRaises(goless.Deadlock):
goless.select(goless.rcase(goless.chan()))
|
cupy/cuda/cutensor.py | prkhrsrvstv1/cupy | 6,180 | 12768529 | """
cuTENSOR Wrapper
Use `cupy_backends.cuda.libs.cutensor` directly in CuPy codebase.
"""
available = True
try:
from cupy_backends.cuda.libs.cutensor import * # NOQA
except ImportError as e:
available = False
from cupy._environment import _preload_warning
_preload_warning('cutensor', e)
|
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lang/objc/forward-decl/TestForwardDecl.py | Polidea/SiriusObfuscator | 427 | 12768540 | <gh_stars>100-1000
"""Test that a forward-declared class works when its complete definition is in a library"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ForwardDeclTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.source = 'main.m'
self.line = line_number(self.source, '// Set breakpoint 0 here.')
self.shlib_names = ["Container"]
@skipUnlessDarwin
def test_expr(self):
self.build()
# Create a target by the debugger.
target = self.dbg.CreateTarget("a.out")
self.assertTrue(target, VALID_TARGET)
# Create the breakpoint inside function 'main'.
breakpoint = target.BreakpointCreateByLocation(self.source, self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Register our shared libraries for remote targets so they get
# automatically uploaded
environment = self.registerSharedLibrariesWithTarget(
target, self.shlib_names)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, environment, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# This should display correctly.
self.expect("expression [j getMember]", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 0x"])
|
src/pyinfrabox/testresult/__init__.py | agu3rra/InfraBox | 265 | 12768544 | from builtins import int, range
from pyinfrabox import ValidationError
from pyinfrabox.utils import *
def check_version(v, path):
if not isinstance(v, int):
raise ValidationError(path, "must be an int")
if v != 1:
raise ValidationError(path, "unsupported version")
def parse_measurement(d, path):
check_allowed_properties(d, path, ("name", "unit", "value"))
check_required_properties(d, path, ("name", "unit", "value"))
check_text(d['unit'], path + ".unit")
check_text(d['name'], path + ".name")
check_text(d['value'], path + ".value")
def parse_measurements(e, path):
if not isinstance(e, list):
raise ValidationError(path, "must be an array")
for i in range(0, len(e)):
elem = e[i]
path = "%s[%s]" % (path, i)
parse_measurement(elem, path)
def parse_t(d, path):
check_allowed_properties(d, path,
("suite", "name", "status", "duration", "message",
"stack", "measurements"))
check_required_properties(d, path, ("suite", "name", "status", "duration"))
check_text(d['suite'], path + ".suite")
check_text(d['name'], path + ".name")
check_text(d['status'], path + ".status")
check_number(d['duration'], path + ".duration")
if 'message' in d:
check_text(d['message'], path + ".message")
if 'stack' in d:
check_text(d['stack'], path + ".stack")
if 'measurements' in d:
parse_measurements(d['measurements'], path + ".measurements")
def parse_ts(e, path):
if not isinstance(e, list):
raise ValidationError(path, "must be an array")
if not e:
raise ValidationError(path, "must not be empty")
for i in range(0, len(e)):
elem = e[i]
p = "%s[%s]" % (path, i)
parse_t(elem, p)
def parse_document(d):
check_allowed_properties(d, "#", ("version", "tests"))
check_required_properties(d, "#", ("version", "tests"))
check_version(d['version'], "#version")
parse_ts(d['tests'], "#tests")
def validate_result(d):
parse_document(d)
|
notebook/opencv_threshold.py | vhn0912/python-snippets | 174 | 12768565 | import cv2
im = cv2.imread('data/src/lena_square_half.png')
th, im_th = cv2.threshold(im, 128, 255, cv2.THRESH_BINARY)
print(th)
# 128.0
cv2.imwrite('data/dst/opencv_th.jpg', im_th)
# True
th, im_th_tz = cv2.threshold(im, 128, 255, cv2.THRESH_TOZERO)
print(th)
# 128.0
cv2.imwrite('data/dst/opencv_th_tz.jpg', im_th_tz)
# True
# th, im_th_otsu = cv2.threshold(im, 128, 192, cv2.THRESH_OTSU)
# error: OpenCV(4.2.0) /tmp/opencv-20200105-17262-cwpzm4/opencv-4.2.0/modules/imgproc/src/thresh.cpp:1529: error: (-215:Assertion failed) src.type() == CV_8UC1 in function 'threshold'
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
th, im_gray_th_otsu = cv2.threshold(im_gray, 128, 192, cv2.THRESH_OTSU)
print(th)
# 117.0
cv2.imwrite('data/dst/opencv_th_otsu.jpg', im_gray_th_otsu)
# True
|
cgls.py | iggyuga/raisr2 | 275 | 12768568 | import numpy as np
def cgls(A, b):
height, width = A.shape
x = np.zeros((height))
while(True):
sumA = A.sum()
if (sumA < 100):
break
if (np.linalg.det(A) < 1):
A = A + np.eye(height, width) * sumA * 0.000000005
else:
x = np.linalg.inv(A).dot(b)
break
return x |
tests/test_handwrite.py | ReveRoyl/Handright-generator | 706 | 12768586 | <reponame>ReveRoyl/Handright-generator<filename>tests/test_handwrite.py
# coding: utf-8
import copy
import PIL.Image
import PIL.ImageDraw
from handright import *
from tests.util import *
BACKGROUND_COLOR = "white"
WIDTH = 32
HEIGHT = 32
SIZE = (WIDTH, HEIGHT)
SEED = "Handright"
def get_default_template():
template = Template(
background=PIL.Image.new(
mode="RGB",
size=SIZE,
color=BACKGROUND_COLOR
),
left_margin=3,
top_margin=6,
right_margin=3,
bottom_margin=6,
line_spacing=2,
font=get_default_font(2),
font_size_sigma=0,
)
return template
def test_side_effect():
text = get_short_text()
template = get_default_template()
template_clone = copy.copy(template)
handwrite(text, template)
assert text == get_short_text()
assert template == template_clone
def test_null_text():
assert list(handwrite("", get_default_template())) == []
def test_blank_text():
temp = get_default_template()
images = handwrite(" ", temp)
assert temp.get_background() == next(images)
def test_seed():
text = get_long_text()
template = get_default_template()
for seed in (0, "Handright"):
ims1 = handwrite(text, template, seed=seed)
ims2 = handwrite(text, template, seed=seed)
assert list(ims1) == list(ims2)
def test_line_and_page_breaks():
text = "哈" * 4
template = Template(
background=PIL.Image.new(mode="L", size=(30, 30), color="white"),
font=get_default_font(12),
left_margin=3,
right_margin=3,
top_margin=3,
bottom_margin=3,
word_spacing_sigma=0,
font_size_sigma=0,
)
images = handwrite(text, template)
assert len(list(images)) == 1
def test_line_separators():
text1 = "a\nb\nc\n"
text2 = "a\rb\rc\r"
text3 = "a\r\nb\r\nc\r\n"
text4 = "a\rb\nc\r\n"
text5 = "a\rb\nc\r"
text6 = "a\r\nb\rc\r"
text7 = "a\r\nb\nc\n"
template = get_default_template()
assert (list(handwrite(text1, template, seed=SEED))
== list(handwrite(text2, template, seed=SEED))
== list(handwrite(text3, template, seed=SEED))
== list(handwrite(text4, template, seed=SEED))
== list(handwrite(text5, template, seed=SEED))
== list(handwrite(text6, template, seed=SEED))
== list(handwrite(text7, template, seed=SEED)))
|
Lib/test/test_compiler/test_static/final.py | isabella232/cinder-1 | 1,886 | 12768611 | <gh_stars>1000+
from compiler.errors import TypedSyntaxError
from typing import ClassVar
from .common import StaticTestBase
class FinalTests(StaticTestBase):
def test_final_multiple_typeargs(self):
codestr = """
from typing import Final
from something import hello
x: Final[int, str] = hello()
"""
with self.assertRaisesRegex(
TypedSyntaxError,
r"incorrect number of generic arguments for Final\[T\], expected 1, got 2",
):
self.compile(codestr, modname="foo")
def test_final_annotation_nesting(self):
with self.assertRaisesRegex(
TypedSyntaxError, "Final annotation is only valid in initial declaration"
):
self.compile(
"""
from typing import Final, List
x: List[Final[str]] = []
""",
modname="foo",
)
with self.assertRaisesRegex(
TypedSyntaxError, "Final annotation is only valid in initial declaration"
):
self.compile(
"""
from typing import Final, List
x: List[int | Final] = []
""",
modname="foo",
)
def test_final(self):
codestr = """
from typing import Final
x: Final[int] = 0xdeadbeef
"""
self.compile(codestr, modname="foo")
def test_final_generic(self):
codestr = """
from typing import Final
x: Final[int] = 0xdeadbeef
"""
self.compile(codestr, modname="foo")
def test_final_generic_types(self):
codestr = """
from typing import Final
def g(i: int) -> int:
return i
def f() -> int:
x: Final[int] = 0xdeadbeef
return g(x)
"""
self.compile(codestr, modname="foo")
def test_final_uninitialized(self):
codestr = """
from typing import Final
x: Final[int]
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Must assign a value when declaring a Final"
):
self.compile(codestr, modname="foo")
def test_final_reassign(self):
codestr = """
from typing import Any, Final
x: Final[Any] = 0xdeadbeef
x = "something"
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassign_explicit_global(self):
codestr = """
from typing import Final
a: Final[int] = 1337
def fn():
def fn2():
global a
a = 0
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassign_explicit_global_shadowed(self):
codestr = """
from typing import Final
a: Final[int] = 1337
def fn():
a = 2
def fn2():
global a
a = 0
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassign_nonlocal(self):
codestr = """
from typing import Final
a: Final[int] = 1337
def fn():
def fn2():
nonlocal a
a = 0
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassign_nonlocal_shadowed(self):
codestr = """
from typing import Final
a: Final[int] = 1337
def fn():
a = 3
def fn2():
nonlocal a
# should be allowed, we're assigning to the shadowed
# value
a = 0
"""
self.compile(codestr, modname="foo")
def test_final_reassigned_in_tuple(self):
codestr = """
from typing import Final
x: Final[int] = 0xdeadbeef
y = 3
x, y = 4, 5
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassigned_in_loop(self):
codestr = """
from typing import Final
x: Final[int] = 0xdeadbeef
for x in [1, 3, 5]:
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassigned_in_except(self):
codestr = """
from typing import Final
def f():
e: Final[int] = 3
try:
x = 1 + "2"
except Exception as e:
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassigned_in_loop_target_tuple(self):
codestr = """
from typing import Final
x: Final[int] = 0xdeadbeef
for x, y in [(1, 2)]:
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassigned_in_ctxmgr(self):
codestr = """
from typing import Final
x: Final[int] = 0xdeadbeef
with open("lol") as x:
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_generic_reassign(self):
codestr = """
from typing import Final
x: Final[int] = 0xdeadbeef
x = 0x5ca1ab1e
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_callable_protocol_retains_inferred_type(self):
codestr = """
from typing import Final, Protocol
def foo(x: int) -> str:
return "A"
class CallableProtocol(Protocol):
def __call__(self, x: int) -> str:
pass
f: Final[CallableProtocol] = foo
def bar(x: int) -> str:
return f(x)
"""
with self.in_module(codestr) as mod:
f = mod.bar
self.assertInBytecode(f, "INVOKE_FUNCTION")
def test_final_in_args(self):
codestr = """
from typing import Final
def f(a: Final) -> None:
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError,
"Final annotation is only valid in initial declaration",
):
self.compile(codestr, modname="foo")
def test_final_returns(self):
codestr = """
from typing import Final
def f() -> Final[int]:
return 1
"""
with self.assertRaisesRegex(
TypedSyntaxError,
"Final annotation is only valid in initial declaration",
):
self.compile(codestr, modname="foo")
def test_final_decorator(self):
codestr = """
from typing import final
class C:
@final
def f():
pass
"""
self.compile(codestr, modname="foo")
def test_final_decorator_override(self):
codestr = """
from typing import final
class C:
@final
def f():
pass
class D(C):
def f():
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final attribute of foo.D:f"
):
self.compile(codestr, modname="foo")
def test_final_decorator_override_with_assignment(self):
codestr = """
from typing import final
class C:
@final
def f():
pass
class D(C):
f = print
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final attribute of foo.D:f"
):
self.compile(codestr, modname="foo")
def test_final_decorator_override_transitivity(self):
codestr = """
from typing import final
class C:
@final
def f():
pass
class D(C):
pass
class E(D):
def f():
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final attribute of foo.E:f"
):
self.compile(codestr, modname="foo")
def test_final_decorator_class(self):
codestr = """
from typing import final
@final
class C:
def f(self):
pass
def f():
return C().f()
"""
c = self.compile(codestr, modname="foo")
f = self.find_code(c, "f")
self.assertInBytecode(f, "INVOKE_FUNCTION")
def test_final_decorator_class_inheritance(self):
codestr = """
from typing import final
@final
class C:
pass
class D(C):
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Class `foo.D` cannot subclass a Final class: `foo.C`"
):
self.compile(codestr, modname="foo")
def test_final_decorator_class_nonstatic_subclass(self):
codestr = """
from typing import final
@final
class C:
pass
"""
with self.in_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, "type 'C' is not an acceptable base type"
):
class D(mod.C):
pass
def test_final_decorator_class_dynamic(self):
"""We should never mark DYNAMIC_TYPE as final."""
codestr = """
from typing import final, Generic, NamedTuple
@final
class NT(NamedTuple):
x: int
class C(Generic):
pass
"""
# No TypedSyntaxError "cannot inherit from Final class 'dynamic'"
self.compile(codestr)
def test_final_constant_folding_int(self):
codestr = """
from typing import Final
X: Final[int] = 1337
def plus_1337(i: int) -> int:
return i + X
"""
with self.in_module(codestr) as mod:
plus_1337 = mod.plus_1337
self.assertInBytecode(plus_1337, "LOAD_CONST", 1337)
self.assertNotInBytecode(plus_1337, "LOAD_GLOBAL")
self.assertEqual(plus_1337(3), 1340)
def test_final_constant_folding_bool(self):
codestr = """
from typing import Final
X: Final[bool] = True
def f() -> bool:
return not X
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertInBytecode(f, "LOAD_CONST", True)
self.assertNotInBytecode(f, "LOAD_GLOBAL")
self.assertFalse(f())
def test_final_constant_folding_str(self):
codestr = """
from typing import Final
X: Final[str] = "omg"
def f() -> str:
return X[1]
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertInBytecode(f, "LOAD_CONST", "omg")
self.assertNotInBytecode(f, "LOAD_GLOBAL")
self.assertEqual(f(), "m")
def test_final_constant_folding_disabled_on_nonfinals(self):
codestr = """
from typing import Final
X: str = "omg"
def f() -> str:
return X[1]
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertNotInBytecode(f, "LOAD_CONST", "omg")
self.assertInBytecode(f, "LOAD_GLOBAL", "X")
self.assertEqual(f(), "m")
def test_final_constant_folding_disabled_on_nonconstant_finals(self):
codestr = """
from typing import Final
def p() -> str:
return "omg"
X: Final[str] = p()
def f() -> str:
return X[1]
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertNotInBytecode(f, "LOAD_CONST", "omg")
self.assertInBytecode(f, "LOAD_GLOBAL", "X")
self.assertEqual(f(), "m")
def test_final_constant_folding_shadowing(self):
codestr = """
from typing import Final
X: Final[str] = "omg"
def f() -> str:
X = "lol"
return X[1]
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertInBytecode(f, "LOAD_CONST", "lol")
self.assertNotInBytecode(f, "LOAD_GLOBAL", "omg")
self.assertEqual(f(), "o")
def test_final_constant_folding_in_module_scope(self):
codestr = """
from typing import Final
X: Final[int] = 21
y = X + 3
"""
c = self.compile(codestr, modname="foo.py")
self.assertNotInBytecode(c, "LOAD_NAME", "X")
with self.in_module(codestr) as mod:
self.assertEqual(mod.y, 24)
def test_final_constant_in_module_scope(self):
codestr = """
from typing import Final
X: Final[int] = 21
"""
with self.in_module(codestr) as mod:
self.assertEqual(mod.__final_constants__, ("X",))
def test_final_nonconstant_in_module_scope(self):
codestr = """
from typing import Final
def p() -> str:
return "omg"
X: Final[str] = p()
"""
with self.in_module(codestr) as mod:
self.assertEqual(mod.__final_constants__, ())
def test_final_method_in_class_slots(self):
codestr = """
from typing import final
class C:
@final
def foo(self):
return self
def bar(self):
return self
"""
with self.in_module(codestr) as mod:
self.assertEqual(mod.C.__final_method_names__, ("foo",))
def test_final_method_in_class_slots_with_inheritance(self):
codestr = """
from typing import final
class C:
@final
def foo(self):
return self
def bar(self):
return self
class D(C):
@final
def bar(self):
return self
def baz(self):
return self
class E(D):
@final
def baz(self):
return self
class F(D):
def baz(self):
return self
"""
with self.in_module(codestr) as mod:
self.assertEqual(mod.C.__final_method_names__, ("foo",))
self.assertEqual(mod.D.__final_method_names__, ("bar", "foo"))
self.assertEqual(mod.E.__final_method_names__, ("bar", "baz", "foo"))
self.assertEqual(mod.F.__final_method_names__, ("bar", "foo"))
def test_final_method_in_class_nonstatic_subclass_slots(self):
codestr = """
from typing import final
class C:
@final
def foo(self):
return self
def bar(self):
return self
"""
with self.in_module(codestr) as mod:
class D(mod.C):
pass
self.assertEqual(D.__final_method_names__, ("foo",))
def test_final_method_nonstatic_override_throws_runtime_type_error(self):
codestr = """
from typing import final
class C:
@final
def foo(self):
return self
def bar(self):
return self
"""
with self.in_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, r"'foo' overrides a final method in the static base class"
):
class D(mod.C):
def foo(self):
return self
def test_final_method_nonstatic_override_of_static_subclass_throws_runtime_type_error(
self,
):
codestr = """
from typing import final
class C:
@final
def foo(self):
return self
def bar(self):
return self
class D(C):
pass
"""
with self.in_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, r"'foo' overrides a final method in the static base class"
):
class E(mod.D):
def foo(self):
return self
def test_final_method_nonstatic_subclass_of_static_class_throws_runtime_type_error(
self,
):
codestr = """
from typing import final
class C:
@final
def foo(self):
return self
def bar(self):
return self
"""
with self.in_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, r"'foo' overrides a final method in the static base class"
):
class D(mod.C):
pass
class E(D):
def foo(self):
return self
def test_final_method_with_other_decorator_throws_type_error(
self,
):
codestr = """
from typing import final
class C:
@final
@staticmethod
def foo():
return self
@staticmethod
@final
def bar():
return self
"""
with self.in_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, r"'foo' overrides a final method in the static base class"
):
class D(mod.C):
@staticmethod
def foo():
return self
with self.assertRaisesRegex(
TypeError, r"'bar' overrides a final method in the static base class"
):
class D(mod.C):
@staticmethod
def bar():
return self
def test_updating_slot_of_final_method_in_subclass_throws_type_error(
self,
):
codestr = """
from typing import final
class C:
@final
def foo(self) -> int:
return 0
"""
with self.in_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, r"'foo' overrides a final method in the static base class"
):
class D(mod.C):
pass
D.foo = lambda self: 0
def test_updating_slot_of_final_method_in_base_class_succeeds(
self,
):
codestr = """
from typing import final
class C:
@final
def foo(self) -> int:
return 0
"""
with self.in_module(codestr) as mod:
class D(mod.C):
pass
mod.C.foo = lambda self: 1
self.assertEqual(mod.C().foo(), 1)
def test_final_method_in_non_final_class_emits_invoke_function(
self,
):
codestr = """
from typing import final
class C:
def __init__(self, x: int) -> None:
self.x = x
@final
def foo(self) -> int:
return self.x
def foo(c: C) -> int:
return c.foo()
"""
with self.in_module(codestr) as mod:
class D(mod.C):
def __init__(self):
super().__init__(5)
self.assertInBytecode(mod.foo, "INVOKE_FUNCTION")
self.assertEqual(mod.foo(mod.C(4)), 4)
self.assertEqual(mod.foo(D()), 5)
def test_final_method_in_subclass_of_non_final_class_emits_invoke_function(
self,
):
codestr = """
from typing import final
class C:
def __init__(self, x: int) -> None:
self.x = x
@final
def foo(self) -> int:
return self.x
class D(C):
def __init__(self) -> None:
self.x = 4
def foo(d: D) -> int:
return d.foo()
"""
with self.in_module(codestr) as mod:
self.assertInBytecode(
mod.foo, "INVOKE_FUNCTION", ((mod.__name__, "C", "foo"), 1)
)
self.assertEqual(mod.foo(mod.D()), 4)
def test_final_classmethod_in_non_final_nonstatic_class_emits_invoke_function(
self,
):
codestr = """
from typing import ClassVar, final
class C:
CV: ClassVar[int] = 42
@final
@classmethod
def foo(cls) -> int:
return cls.CV
def foo(c: C) -> int:
return c.foo()
"""
with self.in_module(codestr) as mod:
class D(mod.C):
CV: ClassVar[int] = 84
self.assertInBytecode(
mod.foo, "INVOKE_FUNCTION", ((mod.__name__, "C", "foo"), 1)
)
self.assertEqual(mod.foo(mod.C()), 42)
self.assertEqual(mod.foo(D()), 84)
def test_final_classmethod_in_non_final_static_class_emits_invoke_function(
self,
):
codestr = """
from typing import ClassVar, final
class C:
CV: ClassVar[int] = 42
@final
@classmethod
def foo(cls) -> int:
return cls.CV
class D(C):
CV: ClassVar[int] = 63
def foo(c: C) -> int:
return c.foo()
"""
with self.in_module(codestr) as mod:
self.assertInBytecode(
mod.foo, "INVOKE_FUNCTION", ((mod.__name__, "C", "foo"), 1)
)
self.assertEqual(mod.foo(mod.C()), 42)
self.assertEqual(mod.foo(mod.D()), 63)
|
src/examples/python/gen_py/datahub/ttypes.py | RogerTangos/datahub-stub | 192 | 12768620 | #
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class ConnectionParams:
"""
Attributes:
- client_id
- seq_id
- user
- password
- app_id
- app_token
- repo_base
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'client_id', None, None, ), # 1
(2, TType.STRING, 'seq_id', None, None, ), # 2
(3, TType.STRING, 'user', None, None, ), # 3
(4, TType.STRING, 'password', None, None, ), # 4
(5, TType.STRING, 'app_id', None, None, ), # 5
(6, TType.STRING, 'app_token', None, None, ), # 6
(7, TType.STRING, 'repo_base', None, None, ), # 7
)
def __init__(self, client_id=None, seq_id=None, user=None, password=None, app_id=None, app_token=None, repo_base=None,):
self.client_id = client_id
self.seq_id = seq_id
self.user = user
self.password = password
self.app_id = app_id
self.app_token = app_token
self.repo_base = repo_base
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.client_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.seq_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.user = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.password = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.app_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.app_token = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.repo_base = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ConnectionParams')
if self.client_id is not None:
oprot.writeFieldBegin('client_id', TType.STRING, 1)
oprot.writeString(self.client_id)
oprot.writeFieldEnd()
if self.seq_id is not None:
oprot.writeFieldBegin('seq_id', TType.STRING, 2)
oprot.writeString(self.seq_id)
oprot.writeFieldEnd()
if self.user is not None:
oprot.writeFieldBegin('user', TType.STRING, 3)
oprot.writeString(self.user)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 4)
oprot.writeString(self.password)
oprot.writeFieldEnd()
if self.app_id is not None:
oprot.writeFieldBegin('app_id', TType.STRING, 5)
oprot.writeString(self.app_id)
oprot.writeFieldEnd()
if self.app_token is not None:
oprot.writeFieldBegin('app_token', TType.STRING, 6)
oprot.writeString(self.app_token)
oprot.writeFieldEnd()
if self.repo_base is not None:
oprot.writeFieldBegin('repo_base', TType.STRING, 7)
oprot.writeString(self.repo_base)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.client_id)
value = (value * 31) ^ hash(self.seq_id)
value = (value * 31) ^ hash(self.user)
value = (value * 31) ^ hash(self.password)
value = (value * 31) ^ hash(self.app_id)
value = (value * 31) ^ hash(self.app_token)
value = (value * 31) ^ hash(self.repo_base)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Connection:
"""
Attributes:
- client_id
- seq_id
- user
- is_app
- repo_base
- cursor
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'client_id', None, None, ), # 1
(2, TType.STRING, 'seq_id', None, None, ), # 2
(3, TType.STRING, 'user', None, None, ), # 3
(4, TType.BOOL, 'is_app', None, None, ), # 4
(5, TType.STRING, 'repo_base', None, None, ), # 5
(6, TType.I64, 'cursor', None, None, ), # 6
)
def __init__(self, client_id=None, seq_id=None, user=None, is_app=None, repo_base=None, cursor=None,):
self.client_id = client_id
self.seq_id = seq_id
self.user = user
self.is_app = is_app
self.repo_base = repo_base
self.cursor = cursor
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.client_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.seq_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.user = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.is_app = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.repo_base = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.cursor = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Connection')
if self.client_id is not None:
oprot.writeFieldBegin('client_id', TType.STRING, 1)
oprot.writeString(self.client_id)
oprot.writeFieldEnd()
if self.seq_id is not None:
oprot.writeFieldBegin('seq_id', TType.STRING, 2)
oprot.writeString(self.seq_id)
oprot.writeFieldEnd()
if self.user is not None:
oprot.writeFieldBegin('user', TType.STRING, 3)
oprot.writeString(self.user)
oprot.writeFieldEnd()
if self.is_app is not None:
oprot.writeFieldBegin('is_app', TType.BOOL, 4)
oprot.writeBool(self.is_app)
oprot.writeFieldEnd()
if self.repo_base is not None:
oprot.writeFieldBegin('repo_base', TType.STRING, 5)
oprot.writeString(self.repo_base)
oprot.writeFieldEnd()
if self.cursor is not None:
oprot.writeFieldBegin('cursor', TType.I64, 6)
oprot.writeI64(self.cursor)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.client_id)
value = (value * 31) ^ hash(self.seq_id)
value = (value * 31) ^ hash(self.user)
value = (value * 31) ^ hash(self.is_app)
value = (value * 31) ^ hash(self.repo_base)
value = (value * 31) ^ hash(self.cursor)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Tuple:
"""
Attributes:
- cells
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'cells', (TType.STRING,None), None, ), # 1
)
def __init__(self, cells=None,):
self.cells = cells
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.cells = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = iprot.readString();
self.cells.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Tuple')
if self.cells is not None:
oprot.writeFieldBegin('cells', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.cells))
for iter6 in self.cells:
oprot.writeString(iter6)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.cells)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ResultSet:
"""
Attributes:
- status
- con
- num_tuples
- num_more_tuples
- tuples
- field_names
- field_types
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'status', None, None, ), # 1
(2, TType.STRUCT, 'con', (Connection, Connection.thrift_spec), None, ), # 2
(3, TType.I64, 'num_tuples', None, None, ), # 3
(4, TType.I64, 'num_more_tuples', None, None, ), # 4
(5, TType.LIST, 'tuples', (TType.STRUCT,(Tuple, Tuple.thrift_spec)), None, ), # 5
(6, TType.LIST, 'field_names', (TType.STRING,None), None, ), # 6
(7, TType.LIST, 'field_types', (TType.STRING,None), None, ), # 7
)
def __init__(self, status=None, con=None, num_tuples=None, num_more_tuples=None, tuples=None, field_names=None, field_types=None,):
self.status = status
self.con = con
self.num_tuples = num_tuples
self.num_more_tuples = num_more_tuples
self.tuples = tuples
self.field_names = field_names
self.field_types = field_types
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.status = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.con = Connection()
self.con.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.num_tuples = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.num_more_tuples = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.tuples = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in xrange(_size7):
_elem12 = Tuple()
_elem12.read(iprot)
self.tuples.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.field_names = []
(_etype16, _size13) = iprot.readListBegin()
for _i17 in xrange(_size13):
_elem18 = iprot.readString();
self.field_names.append(_elem18)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.LIST:
self.field_types = []
(_etype22, _size19) = iprot.readListBegin()
for _i23 in xrange(_size19):
_elem24 = iprot.readString();
self.field_types.append(_elem24)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ResultSet')
if self.status is not None:
oprot.writeFieldBegin('status', TType.BOOL, 1)
oprot.writeBool(self.status)
oprot.writeFieldEnd()
if self.con is not None:
oprot.writeFieldBegin('con', TType.STRUCT, 2)
self.con.write(oprot)
oprot.writeFieldEnd()
if self.num_tuples is not None:
oprot.writeFieldBegin('num_tuples', TType.I64, 3)
oprot.writeI64(self.num_tuples)
oprot.writeFieldEnd()
if self.num_more_tuples is not None:
oprot.writeFieldBegin('num_more_tuples', TType.I64, 4)
oprot.writeI64(self.num_more_tuples)
oprot.writeFieldEnd()
if self.tuples is not None:
oprot.writeFieldBegin('tuples', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.tuples))
for iter25 in self.tuples:
iter25.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.field_names is not None:
oprot.writeFieldBegin('field_names', TType.LIST, 6)
oprot.writeListBegin(TType.STRING, len(self.field_names))
for iter26 in self.field_names:
oprot.writeString(iter26)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.field_types is not None:
oprot.writeFieldBegin('field_types', TType.LIST, 7)
oprot.writeListBegin(TType.STRING, len(self.field_types))
for iter27 in self.field_types:
oprot.writeString(iter27)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
value = (value * 31) ^ hash(self.con)
value = (value * 31) ^ hash(self.num_tuples)
value = (value * 31) ^ hash(self.num_more_tuples)
value = (value * 31) ^ hash(self.tuples)
value = (value * 31) ^ hash(self.field_names)
value = (value * 31) ^ hash(self.field_types)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DBException(TException):
"""
Attributes:
- error_code
- message
- details
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'error_code', None, None, ), # 1
(2, TType.STRING, 'message', None, None, ), # 2
(3, TType.STRING, 'details', None, None, ), # 3
)
def __init__(self, error_code=None, message=None, details=None,):
self.error_code = error_code
self.message = message
self.details = details
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.error_code = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.details = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DBException')
if self.error_code is not None:
oprot.writeFieldBegin('error_code', TType.I32, 1)
oprot.writeI32(self.error_code)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 2)
oprot.writeString(self.message)
oprot.writeFieldEnd()
if self.details is not None:
oprot.writeFieldBegin('details', TType.STRING, 3)
oprot.writeString(self.details)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.error_code)
value = (value * 31) ^ hash(self.message)
value = (value * 31) ^ hash(self.details)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
statistics/stat_multiv_solutions.py | gautard/pystatsml | 123 | 12768646 | '''
Munivariate statistics exercises
================================
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
np.random.seed(seed=42) # make the example reproducible
'''
### Dot product and Euclidean norm
'''
a = np.array([2,1])
b = np.array([1,1])
def euclidian(x):
return np.sqrt(np.dot(x, x))
euclidian(a)
euclidian(a - b)
np.dot(b, a / euclidian(a))
X = np.random.randn(100, 2)
np.dot(X, a / euclidian(a))
'''
### Covariance matrix and Mahalanobis norm
'''
N = 100
mu = np.array([1, 1])
Cov = np.array([[1, .8],
[.8, 1]])
X = np.random.multivariate_normal(mu, Cov, N)
xbar = np.mean(X, axis=0)
print(xbar)
Xc = (X - xbar)
np.mean(Xc, axis=0)
S = 1 / (N - 1) * np.dot(Xc.T, Xc)
print(S)
#import scipy
Sinv = np.linalg.inv(S)
def mahalanobis(x, xbar, Sinv):
xc = x - xbar
return np.sqrt(np.dot(np.dot(xc, Sinv), xc))
dists = pd.DataFrame(
[[mahalanobis(X[i, :], xbar, Sinv),
euclidian(X[i, :] - xbar)] for i in range(X.shape[0])],
columns = ['Mahalanobis', 'Euclidean'])
print(dists[:10])
x = X[0, :]
import scipy.spatial
assert(mahalanobis(X[0, :], xbar, Sinv) == scipy.spatial.distance.mahalanobis(xbar, X[0, :], Sinv))
assert(mahalanobis(X[1, :], xbar, Sinv) == scipy.spatial.distance.mahalanobis(xbar, X[1, :], Sinv))
|
examples/tickOptionBox.py | tgolsson/appJar | 666 | 12768650 | <gh_stars>100-1000
import sys
sys.path.append("../")
new_ticks = ["Dogs2", "Cats2", "-", " ", "Hamsters2", "Fish2", "Spiders2", "", " "]
orig_ticks = ["Dogs", "Cats", "Hamsters", "Fish", "Spiders"]
from appJar import gui
def get(btn):
print(app.getOptionBox("Favourite Pets"))
print(app.getOptionBox("The Action"))
def tickOption(opt):
print("tick box", opt)
app.setOptionBox("Favourite Pets", opt, app.getCheckBox(opt))
def tickOptionBox(opt):
print("menu tick box", opt)
optValue = app.getOptionBox("Favourite Pets")[opt]
app.setCheckBox(opt, optValue, callFunction=False)
def doAction(act):
app.setOptionBox("The Action", app.getOptionBox(act))
def findIndex(act):
app.setOptionBox("The Action", app.getScale(act))
def changeOptions(btn=None):
app.changeOptionBox("Favourite Pets", new_ticks)
app.setOptionBoxChangeFunction("Favourite Pets", tickOptionBox)
def changeOptionsBack(btn=None):
app.changeOptionBox("Favourite Pets", orig_ticks)
app.setOptionBoxChangeFunction("Favourite Pets", tickOptionBox)
app=gui()
app.setFont(20)
app.setBg("PapayaWhip")
app.addLabelTickOptionBox("Favourite Pets", [])
changeOptionsBack()
app.addLabelOptionBox("The Action", ["Pet", "Stroke", "Feed", "Bathe", "Walk"])
app.addLabelOptionBox("Set Action", ["Pet", "Stroke", "Feed", "Bathe", "Walk"])
app.setOptionBoxChangeFunction("Set Action", doAction)
app.addScale("index")
app.setScaleRange("index", 0,4)
app.showScaleValue("index")
app.setScaleChangeFunction("index", findIndex)
app.startLabelFrame("Tick Us")
app.addCheckBox("Dogs")
app.addCheckBox("Cats")
app.addCheckBox("Hamsters")
app.addCheckBox("Fish")
app.addCheckBox("People")
app.setCheckBoxChangeFunction("Dogs", tickOption)
app.setCheckBoxChangeFunction("Cats", tickOption)
app.setCheckBoxChangeFunction("Hamsters", tickOption)
app.setCheckBoxChangeFunction("Fish", tickOption)
app.setCheckBoxChangeFunction("People", tickOption)
app.stopLabelFrame()
app.addButtons(["GET", "CHANGE", "BACK"], [get,changeOptions, changeOptionsBack])
#app.setCheckBox("Dogs", True)
#app.setOptionBox("Favourite Pets", "Dogs")
app.go()
|
test/kernel/integration/LiveUpdate/test.py | jaeh/IncludeOS | 3,673 | 12768671 | <filename>test/kernel/integration/LiveUpdate/test.py<gh_stars>1000+
#!/usr/bin/env python3
from builtins import str
import sys
import os
import socket
from vmrunner import vmrunner
vm = vmrunner.vms[0]
def begin_test(line):
f = open('./kernel_LiveUpdate','rb')
s = socket.socket()
s.connect(("10.0.0.59", 666))
s.send(f.read())
s.close()
vm.on_output("Ready to receive binary blob", begin_test)
if len(sys.argv) > 1:
vm.boot(40,image_name=str(sys.argv[1]))
else:
vm.cmake().boot(40,image_name='kernel_LiveUpdate').clean()
|
underworld/libUnderworld/config/packages/StGermain.py | longgangfan/underworld2 | 116 | 12768682 | <filename>underworld/libUnderworld/config/packages/StGermain.py
import os
from config import Package
from .libXML2 import libXML2
from .MPI import MPI
from .pcu import pcu
class StGermain(Package):
def setup_dependencies(self):
self.mpi = self.add_dependency(MPI, required=True)
self.libxml2 = self.add_dependency(libXML2, required=True)
self.pcu = self.add_dependency(pcu, required=True)
def gen_locations(self):
yield ('/usr', [], [])
yield ('/usr/local', [], [])
def gen_envs(self, loc):
for env in Package.gen_envs(self, loc):
self.headers = [os.path.join('StGermain', 'StGermain.h')]
if self.find_libraries(loc[2], 'StGermain'):
env.PrependUnique(LIBS=['StGermain'])
yield env
|
Python3/664.py | rakhi2001/ecom7 | 854 | 12768722 | __________________________________________________________________________________________________
sample 140 ms submission
from functools import lru_cache
class Solution:
def strangePrinter(self, s: str) -> int:
@lru_cache(None)
def find_min(start, end):
if start >= end: return 1 if end == start else 0
out, k = 1 + find_min(start+1, end), nextIdx[start]
while k != None and k <= end:
temp = find_min(start,k-1) + find_min(k+1,end)
if temp < out: out = temp
k = nextIdx[k]
return out
if not s: return 0
new_s = []
for i, c in enumerate(s[:-1]):
if s[i+1] != c: new_s.append(c)
s = ''.join(new_s + [s[-1]])
nextIdx = [None] * len(s)
lastIdx = {}
for i in range(len(s)-1, -1, -1):
if s[i] in lastIdx: nextIdx[i] = lastIdx[s[i]]
lastIdx[s[i]] = i
return find_min(0, len(s)-1)
__________________________________________________________________________________________________
sample 13080 kb submission
class Solution:
def strangePrinter(self, s: str) -> int:
n = len(s)
if n == 0: return 0
dp = [[0] * n for i in range(n)]
for i in range(n): # length of range is 1
dp[i][i] = 1
for l in range(2, n+1): # length of range [i, j] from 2 to n
for i in range(n - l + 1):
j = i + l - 1
dp[i][j] = 1 + dp[i+1][j] # default choice, print the first letter s[i] and then the rest
for k in range(i+1, j+1):
if s[k] == s[i]: # better choice than default one
dp[i][j] = min(dp[i][j], dp[i][k-1] + (0 if k+1 > j else dp[k+1][j]))
return dp[0][n-1]
# a x x x x x x x a x x x x x
# dp[i][j] = min(dp[i][j], dp[i][k-1] + dp[k+1][j])
__________________________________________________________________________________________________
|
sample-apps/segmentation_spleen/lib/train.py | IntroAI-termproject/MONAILabel | 214 | 12768731 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from monai.inferers import SlidingWindowInferer
from monai.losses import DiceCELoss
from monai.optimizers import Novograd
from monai.transforms import (
Activationsd,
AddChanneld,
AsDiscreted,
CropForegroundd,
EnsureTyped,
LoadImaged,
RandCropByPosNegLabeld,
RandShiftIntensityd,
ScaleIntensityRanged,
Spacingd,
ToDeviced,
ToTensord,
)
from monailabel.tasks.train.basic_train import BasicTrainTask, Context
logger = logging.getLogger(__name__)
class MyTrain(BasicTrainTask):
def __init__(
self,
model_dir,
network,
description="Train Segmentation model for spleen",
**kwargs,
):
self._network = network
super().__init__(model_dir, description, **kwargs)
def network(self, context: Context):
return self._network
def optimizer(self, context: Context):
return Novograd(self._network.parameters(), 0.0001)
def loss_function(self, context: Context):
return DiceCELoss(to_onehot_y=True, softmax=True, squared_pred=True, batch=True)
def train_pre_transforms(self, context: Context):
t = [
LoadImaged(keys=("image", "label")),
AddChanneld(keys=("image", "label")),
Spacingd(
keys=("image", "label"),
pixdim=(1.0, 1.0, 1.0),
mode=("bilinear", "nearest"),
),
ScaleIntensityRanged(keys="image", a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
CropForegroundd(keys=("image", "label"), source_key="image"),
]
if context.request.get("to_gpu", False):
t.extend([EnsureTyped(keys=("image", "label")), ToDeviced(keys=("image", "label"), device=context.device)])
t.extend(
[
RandCropByPosNegLabeld(
keys=("image", "label"),
label_key="label",
spatial_size=(96, 96, 96),
pos=1,
neg=1,
num_samples=4,
image_key="image",
image_threshold=0,
),
RandShiftIntensityd(keys="image", offsets=0.1, prob=0.5),
]
)
return t
def train_post_transforms(self, context: Context):
return [
ToTensord(keys=("pred", "label")),
Activationsd(keys="pred", softmax=True),
AsDiscreted(
keys=("pred", "label"),
argmax=(True, False),
to_onehot=True,
n_classes=2,
),
]
def val_pre_transforms(self, context: Context):
t = [
LoadImaged(keys=("image", "label")),
AddChanneld(keys=("image", "label")),
Spacingd(
keys=("image", "label"),
pixdim=(1.0, 1.0, 1.0),
mode=("bilinear", "nearest"),
),
ScaleIntensityRanged(keys="image", a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
CropForegroundd(keys=("image", "label"), source_key="image"),
]
if context.request.get("to_gpu", False):
t.extend([EnsureTyped(keys=("image", "label")), ToDeviced(keys=("image", "label"), device=context.device)])
return t
def val_inferer(self, context: Context):
return SlidingWindowInferer(roi_size=(160, 160, 160), sw_batch_size=1, overlap=0.25)
|
libs/configs/_base_/models/retinanet_r50_fpn.py | Artcs1/RotationDetection | 850 | 12768734 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
ROOT_PATH = os.path.abspath('../../')
print(ROOT_PATH)
SUMMARY_PATH = os.path.join(ROOT_PATH, 'output/summary')
# backbone
NET_NAME = 'resnet50_v1d'
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
# neck
FPN_MODE = 'fpn'
SHARE_NET = True
USE_P5 = True
FPN_CHANNEL = 256
# bbox head
NUM_SUBNET_CONV = 4
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
ANGLE_RANGE = 90 # 90 or 180
USE_GN = False
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
# loss
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
# sample
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
# post-processing
NMS = True
NMS_IOU_THRESHOLD = 0.3
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.4
# test and eval
TEST_SAVE_PATH = os.path.join(ROOT_PATH, 'tools/test_result')
EVALUATE_R_DIR = os.path.join(ROOT_PATH, 'output/evaluate_result_pickle/')
USE_07_METRIC = True
EVAL_THRESHOLD = 0.5
|
lib/python/abcutils/CMakeCache.py | ryu-sw/alembic | 921 | 12768742 | <gh_stars>100-1000
#!/usr/bin/env python2.6
#-*- mode: python -*-
##-*****************************************************************************
##
## Copyright (c) 2009-2011,
## <NAME> Imageworks Inc. and
## Industrial Light & Magic, a division of Lucasfilm Entertainment Company Ltd.
##
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above
## copyright notice, this list of conditions and the following disclaimer
## in the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Industrial Light & Magic nor the names of
## its contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##-*****************************************************************************
from __future__ import with_statement
import os, sys, re
from Path import Path
##-*****************************************************************************
COMMENT = re.compile( r"//|#" )
WS = re.compile( r"\s" )
##-*****************************************************************************
class CacheEntry( object ):
def __init__( self, _line ):
line = WS.sub( "", str( _line ) )
if not line:
return None
elif COMMENT.match( line ):
return None
else:
# get rid of comments at the end of the line
line = COMMENT.split( line, 1 )[0].strip()
try:
name_type, value = line.split( '=' )
self._value = value.strip()
if self._value == '':
self._value = None
name, typ = name_type.split( ':' )
self._name = name.strip()
self._type = typ.strip()
except ValueError:
sys.stderr.write( "Could not parse line '%s'\n" % _line )
self._value = None
self._name = None
self._type = None
def __str__( self ):
val = ""
typ = ""
if self._value != None:
val = self._value
if self._type != None:
typ = self._type
if self._name == None:
return ""
else:
s = "%s:%s=%s" % ( self._name, typ, val )
return s.strip()
def __eq__( self, other ):
return str( self ) == str( other )
def __nonzero__( self ):
try:
return self._name != None and self._value != None
except AttributeError:
return False
def name( self ):
return self._name
def value( self, newval = None ):
if newval != None:
self._value = newval
else:
return self._value
def hint( self ):
"""Return the CMakeCache TYPE of the entry; used as a hint to CMake
GUIs."""
return self._type
##-*****************************************************************************
class CMakeCache( object ):
"""This class is used to read in and get programmatic access to the
variables in a CMakeCache.txt file, manipulate them, and then write the
cache back out."""
def __init__( self, path=None ):
self._cachefile = Path( path )
_cachefile = str( self._cachefile )
self._entries = {}
if self._cachefile.exists():
with open( _cachefile ) as c:
entries = filter( None, map( lambda x: CacheEntry( x ),
c.readlines() ) )
entries = filter( lambda x: x.value() != None, entries )
for i in entries:
self._entries[i.name()] = i
def __contains__( self, thingy ):
try:
return thingy in self.names()
except TypeError:
return thingy in self._entries.values()
def __iter__( self ):
return self._entries
def __nonzero__( self ):
return len( self._entries ) > 0
def __str__( self ):
return os.linesep.join( map( lambda x: str( x ), self.entries() ) )
def add( self, entry ):
e = CacheEntry( entry )
if e:
if not e in self:
self._entries[e.name()] = e
else:
sys.stderr.write( "Entry for '%s' is already in the cache.\n" % \
e.name() )
else:
sys.stderr.write( "Could not create cache entry for '%s'\n" % e )
def update( self, entry ):
e = CacheEntry( entry )
if e:
self._entries[e.name()] = e
else:
sys.stderr.write( "Could not create cache entry for '%s'\n" % e )
def names( self ):
return self._entries.keys()
def entries( self ):
return self._entries.values()
def get( self, name ):
return self._entries[name]
def cachefile( self ):
return self._cachefile
def refresh( self ):
self.__init__( self._cachefile )
def write( self, newfile = None ):
if newfile == None:
newfile = self._cachefile
with open( newfile, 'w' ) as f:
for e in self.entries():
f.write( str( e ) + os.linesep )
|
Lambda/MatchStatus.py | nkwangjun/aws-gamelift-sample | 131 | 12768755 | import boto3
from botocore.exceptions import ClientError
import json
import time
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
ddb_table = dynamodb.Table('GomokuPlayerInfo')
def lambda_handler(event, context):
print(event)
# You can also use TicketId to track Matchmaking Event.
ticket_id = event['TicketId']
player_name = event['PlayerName']
response = { 'IpAddress': '', 'PlayerSessionId': '', 'Port': 0 }
try:
match_response = ddb_table.get_item(
TableName='GomokuPlayerInfo',
Key={
'PlayerName': player_name
})
if 'Item' in match_response:
print(match_response['Item'])
connection_info = json.loads(match_response['Item']['ConnectionInfo'])
if connection_info['status'] == 'matching':
response['IpAddress'] = connection_info['IpAddress']
response['Port'] = connection_info['Port']
response['PlayerSessionId'] = connection_info['PlayerSessionId']
connection_update = { 'IpAddress': connection_info['IpAddress'], 'Port': connection_info['Port'], 'PlayerSessionId': connection_info['PlayerSessionId'], 'timestamp': int(time.time()), 'status': 'complete' }
ddb_table.update_item(
TableName="GomokuPlayerInfo",
Key={ 'PlayerName' : player_name },
UpdateExpression="set ConnectionInfo = :connection_update",
ExpressionAttributeValues={
':connection_update': "" + json.dumps(connection_update),
},
ReturnValues="UPDATED_NEW"
)
except ClientError as e:
print(e.response['Error']['Message'])
print(response)
return response
|
pinterest_problems/problem_3.py | loftwah/Daily-Coding-Problem | 129 | 12768761 | """This problem was asked by Pinterest.
At a party, there is a single person who everyone knows, but who does not
know anyone in return (the "celebrity"). To help figure out who this is,
you have access to an O(1) method called knows(a, b), which returns True
if person a knows person b, else False.
Given a list of N people and the above operation,
find a way to identify the celebrity in O(N) time.
""" |
tools/android/modularization/convenience/touch_resources.py | Ron423c/chromium | 575 | 12768768 | #!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
r"""Creates Android resources directories and boilerplate files for a module.
This is a utility script for conveniently creating resources directories and
values .xml files in modules prefilled with boilerplate and example usages. It
prints out suggested changes to the BUILD.gn and will apply them if accepted.
Examples:
Touch colors.xml and styles.xml in module foo:
tools/android/modularization/convenience/touch_resources.py \
chrome/browser/foo \
-v colors styles
Touch dimens.xml in module foo's internal dir for hdpi, xhdpi and xxdpi:
tools/android/modularization/convenience/touch_resources.py \
chrome/browser/foo/internal \
-v dimens \
-q hdpi xhdpi xxhdpi
Touch drawable directories in module foo for hdpi, xhdpi and xxdpi:
tools/android/modularization/convenience/touch_resources.py \
chrome/browser/foo \
-d drawable \
-q hdpi xhdpi xxhdpi
"""
import argparse
import datetime
import pathlib
from typing import List, Optional, Tuple
import build_gn_editor
_IGNORED_FILES_IN_RES = {'DIR_METADATA', 'OWNERS'}
_VALUES_SUPPORTED = [
'arrays',
'colors',
'dimens',
'ids',
'strings',
'styles',
]
_DIRS_SUPPORTED = [
'animator',
'anim',
'color',
'drawable',
'font',
'mipmap',
'layout',
'menu',
'raw',
'values',
'xml',
]
def main():
arg_parser = argparse.ArgumentParser(
description='Creates Android resources directories and boilerplate files '
'for a module.')
arg_parser.add_argument('module',
help='Module directory to create resources for. e.g. '
'chrome/browser/foo')
arg_parser.add_argument('-v',
'--values',
nargs='+',
default=[],
choices=_VALUES_SUPPORTED,
help='Creates values .xml resources files that do '
'not exist yet.')
arg_parser.add_argument(
'-d',
'--directories',
nargs='+',
default=[],
choices=_DIRS_SUPPORTED,
help='Creates resources file directories that do not exist yet. '
'Use --values to create the values directory.')
arg_parser.add_argument(
'-q',
'--qualifiers',
nargs='+',
help='If specified, resources will be created under these Android '
'resources qualifiers. See '
'https://developer.android.com/guide/topics/resources/providing-resources#AlternativeResources'
)
arguments = arg_parser.parse_args()
# Recognize directory structure and determine the existing BUILD.gn location
# and where resources are or should be
build_gn_path, resources_path = _identify_module_structure(arguments.module)
# Create res/ directory if it does not exist
if not resources_path.is_dir():
resources_path.mkdir(parents=True)
print(f'Created resources directory: {resources_path}')
# Detect existing resources
all_resources = [
p for p in resources_path.rglob('*')
if p.is_file() and p.name not in _IGNORED_FILES_IN_RES
]
changes_requested = False
new_resources = []
# Process -q/--qualifiers
if not arguments.qualifiers:
qualifier_suffixes = ['']
else:
qualifier_suffixes = [f'-{qualifier}' for qualifier in arguments.qualifiers]
# Process -v/--values
for value_type in arguments.values:
changes_requested = True
if value_type == 'strings':
raise ValueError(
'strings.xml files are replaced by strings.grd files for '
'localization, and modules do not need to create separate '
'strings.grd files. Existing strings can be left in and new strings '
'can be added to '
'chrome/browser/ui/android/strings/android_chrome_strings.grd')
else:
created_resources = _touch_values_files(resources_path, value_type,
qualifier_suffixes)
new_resources.extend(created_resources)
all_resources.extend(created_resources)
# Process -d/--directories
for subdirectory in arguments.directories:
changes_requested = True
if subdirectory == 'values':
raise ValueError(
'Use -v/--values to create the values directory and values resources.'
)
else:
_touch_subdirectories(resources_path, subdirectory, qualifier_suffixes)
if not changes_requested:
print('No resource types specified to create, so just created the res/ '
'directory. Use -v/--values to create value resources and '
'-d/--directories to create resources subdirectories.')
# Print out build target suggestions
all_resources.sort(key=str)
if not all_resources:
return
build_file = build_gn_editor.BuildFile(build_gn_path)
build_gn_changes_ok = _update_build_file(build_file, all_resources)
if not build_gn_changes_ok:
_print_build_target_suggestions(build_gn_path, all_resources)
return
print('Final delta:')
print(build_file.get_diff())
apply_changes = _yes_or_no('Would you like to apply these changes?')
if not apply_changes:
return
build_file.write_content_to_file()
def _yes_or_no(question: str) -> bool:
val = input(question + ' [(y)es/(N)o] ')
try:
y_or_n = val.lower().strip()
return y_or_n[0] == 'y'
except Exception:
print('Invalid input. Assuming No.')
return False
def _determine_target_to_use(targets: List[str], target_type: str,
default_name: str) -> Optional[str]:
num_targets = len(targets)
if not num_targets:
print(f'Found no existing {target_type} will create ":{default_name}".')
return default_name
elif num_targets == 1:
print(f'Found existing target {target_type}("{targets[0]}"), using it.')
return targets[0]
else:
print(f'Found multiple existing {target_type} targets, pick one: ')
return _enumerate_targets_and_ask(targets)
def _enumerate_targets_and_ask(targets: List[str]) -> Optional[str]:
for i, target in enumerate(targets):
print(f'{i + 1}: {target}')
try:
val = int(
input('Enter the number corresponding the to target you want to '
'use: ')) - 1
except ValueError:
return None
if 0 <= val < len(targets):
return targets[val]
return None
def _identify_module_structure(path_argument: str
) -> Tuple[pathlib.Path, pathlib.Path]:
module_path = pathlib.Path(path_argument)
assert module_path.is_dir()
# If present, prefer module/android/BUILD.gn
possible_android_path = module_path / 'android'
if possible_android_path.is_dir():
possible_build_gn_path = possible_android_path / 'BUILD.gn'
if possible_build_gn_path.is_file():
build_gn_path = possible_build_gn_path
resources_path = possible_android_path / 'java' / 'res'
return build_gn_path, resources_path
# The recommended structure is module/BUILD.gn
possible_build_gn_path = module_path / 'BUILD.gn'
if possible_build_gn_path.is_file():
build_gn_path = possible_build_gn_path
possible_existing_java_path = module_path / 'java'
# If module/java exists, use module/java/res, but the preferred structure is
# module/android/java/res
if possible_existing_java_path.is_dir():
resources_path = possible_existing_java_path / 'res'
else:
resources_path = possible_android_path / 'java' / 'res'
return build_gn_path, resources_path
raise Exception(
f'BUILD.gn found neither in {module_path} nor in {possible_android_path}')
def _touch_values_files(resources_path: pathlib.Path, value_resource_type: str,
qualifier_suffixes: List[str]) -> List[pathlib.Path]:
created_files = []
for qualifier_suffix in qualifier_suffixes:
values_path = resources_path / f'values{qualifier_suffix}'
values_path.mkdir(parents=True, exist_ok=True)
xml_path = values_path / f'{value_resource_type}.xml'
if xml_path.is_file():
print(f'{xml_path} already exists.')
continue
with xml_path.open('a') as f:
f.write(_create_filler(value_resource_type))
print(f'Created {xml_path}')
created_files.append(xml_path)
return created_files
_RESOURCES_BOILERPLATE_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<!-- Copyright {year} The Chromium Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file. -->
<resources xmlns:tools="http://schemas.android.com/tools">
{contents}
</resources>
"""
_DIMENS_BOILERPLATE = """ <!-- Foo icon dimensions -->
<dimen name="foo_icon_height">24dp</dimen>
<dimen name="foo_icon_width">24dp</dimen>"""
_COLORS_BOILERPLATE = """ <!-- Foo UI colors -->
<color name="foo_background_color">@color/default_bg_color_light</color>"""
_STYLES_BOILERPLATE = """ <!-- Styling for a Foo menu button. -->
<style name="FooMenuButton">
<item name="android:layout_width">48dp</item>
<item name="android:layout_height">24dp</item>
<item name="tint">@color/default_icon_color_tint_list</item>
</style>"""
_IDS_BOILERPLATE = """ <!-- Dialog button ids -->
<item type="id" name="foo_ok_button" />
<item type="id" name="foo_cancel_button" />"""
_ARRAYS_BOILERPLATE = """ <!-- Prime numbers -->
<integer-array name="foo_primes">
<item>2</item>
<item>3</item>
<item>5</item>
<item>7</item>
</integer-array>
<!-- Geometrics shapes -->
<array name="foo_shapes">
<item>@drawable/triangle</item>
<item>@drawable/square</item>
<item>@drawable/circle</item>
</array>"""
_BOILERPLATE = {
'dimens': _DIMENS_BOILERPLATE,
'colors': _COLORS_BOILERPLATE,
'styles': _STYLES_BOILERPLATE,
'ids': _IDS_BOILERPLATE,
'arrays': _ARRAYS_BOILERPLATE
}
def _create_filler(value_resource_type: str) -> str:
boilerplate = _BOILERPLATE[value_resource_type]
return _RESOURCES_BOILERPLATE_TEMPLATE.format(year=_get_current_year(),
contents=boilerplate)
def _get_current_year() -> int:
return datetime.datetime.now().year
_COMMON_RESOURCE_DEPS = [
"//chrome/browser/ui/android/strings:ui_strings_grd",
"//components/browser_ui/strings/android:browser_ui_strings_grd",
"//components/browser_ui/styles/android:java_resources",
"//components/browser_ui/widget/android:java_resources",
"//third_party/android_deps:material_design_java",
"//ui/android:ui_java_resources",
]
def _touch_subdirectories(resources_path: pathlib.Path, subdirectory: str,
qualifier_suffixes: List[str]) -> List[pathlib.Path]:
for qualifier_suffix in qualifier_suffixes:
subdir_name = f'{subdirectory}{qualifier_suffix}'
subdir_path = resources_path / subdir_name
if not subdir_path.is_dir():
subdir_path.mkdir(parents=True)
print(f'Created {subdir_path}')
else:
print(f'{subdir_path} already exists.')
def _generate_resources_sources(build_gn_dir_path: pathlib.Path,
new_resources: List[pathlib.Path]) -> List[str]:
return [f'"{str(r.relative_to(build_gn_dir_path))}"' for r in new_resources]
def _list_to_lines(lines, indent):
spaces = ' ' * indent
return '\n'.join([f'{spaces}{line},' for line in lines])
def _generate_suggested_resources_deps() -> List[str]:
return [f'# "{dep}"' for dep in _COMMON_RESOURCE_DEPS]
def _generate_resources_content(build_gn_path: pathlib.Path,
new_resources: List[pathlib.Path], *,
include_comment: bool) -> str:
build_gn_dir_path = build_gn_path.parent
new_resources_lines = _list_to_lines(
_generate_resources_sources(build_gn_dir_path, new_resources), 4)
suggested_deps_lines = _list_to_lines(_generate_suggested_resources_deps(), 4)
comment = ''
if include_comment:
comment = ('\n # Commonly required resources deps for convenience, ' +
'add other required deps and remove unnecessary ones.')
resources_content = f"""sources = [
{new_resources_lines}
]
deps = [{comment}
{suggested_deps_lines}
]"""
return resources_content
def _generate_suggested_resources(build_gn_path: pathlib.Path,
new_resources: List[pathlib.Path]) -> str:
resources_content = _generate_resources_content(build_gn_path,
new_resources,
include_comment=True)
resources_target_suggestion = f"""
android_resources("java_resources") {{
{resources_content}
}}"""
return resources_target_suggestion
def _generate_suggested_java_package(build_gn_path: pathlib.Path) -> str:
build_gn_dir_path = build_gn_path.parent
parts_for_package = build_gn_dir_path.parts
# internal, public or android subdirectories are not part of the Java package.
while parts_for_package[-1] in ('internal', 'public', 'android'):
parts_for_package = parts_for_package[:-1]
return f'org.chromium.{".".join(parts_for_package)}'
def _generate_library_content(build_gn_path: pathlib.Path,
resources_target_name: str) -> str:
suggested_java_package = _generate_suggested_java_package(build_gn_path)
library_content = f"""deps = [
":{resources_target_name}",
]
resources_package = "{suggested_java_package}" """
return library_content
def _generate_library_target(build_gn_path: pathlib.Path,
resources_target_name: str) -> str:
library_content = _generate_library_content(build_gn_path,
resources_target_name)
android_library_target_suggestion = f"""
android_library("java") {{
{library_content}
}}"""
return android_library_target_suggestion
def _create_or_update_variable_list(target: build_gn_editor.BuildTarget,
variable_name: str,
elements: List[str]) -> None:
variable = target.get_variable(variable_name)
if variable:
variable_list = variable.get_content_as_list()
if not variable_list:
raise build_gn_editor.BuildFileUpdateError(
f'{target.get_type()}("{target.get_name()}") '
f'{variable_name} is not a list.')
variable_list.add_elements(elements)
variable.set_content_from_list(variable_list)
target.replace_variable(variable)
return
variable = build_gn_editor.TargetVariable(variable_name, '')
variable_list = build_gn_editor.VariableContentList()
variable_list.add_elements(elements)
variable.set_content_from_list(variable_list)
target.add_variable(variable)
def _update_build_file(build_file: build_gn_editor.BuildFile,
all_resources: List[pathlib.Path]) -> bool:
libraries = build_file.get_target_names_of_type('android_library')
resources = build_file.get_target_names_of_type('android_resources')
library_target = _determine_target_to_use(libraries, 'android_library',
'java')
resources_target = _determine_target_to_use(resources, 'android_resources',
'java_resources')
if not library_target or not resources_target:
print('Invalid build target selections. Aborting BUILD.gn changes.')
return False
try:
_update_build_targets(build_file, all_resources, library_target,
resources_target)
except build_gn_editor.BuildFileUpdateError as e:
print(f'Changes to build targets failed: {e}. Aborting BUILD.gn changes.')
return False
try:
build_file.format_content()
except build_gn_editor.BuildFileUpdateError as e:
print(f'Formatting BUILD gn failed: {e}\n Aborting BUILD.gn changes')
return False
return True
def _update_build_targets(build_file: build_gn_editor.BuildFile,
all_resources: List[pathlib.Path],
library_target: str, resources_target: str) -> None:
resources = build_file.get_target('android_resources', resources_target)
if not resources:
resources = build_gn_editor.BuildTarget(
'android_resources', resources_target,
_generate_resources_content(build_file.get_path(),
all_resources,
include_comment=False))
build_file.add_target(resources)
else:
_create_or_update_variable_list(
resources, 'sources',
_generate_resources_sources(build_file.get_path().parent,
all_resources))
_create_or_update_variable_list(resources, 'deps',
_generate_suggested_resources_deps())
build_file.replace_target(resources)
library = build_file.get_target('android_library', library_target)
if not library:
library = build_gn_editor.BuildTarget(
'android_library', library_target,
_generate_library_content(build_file.get_path(), resources_target))
build_file.add_target(library)
else:
_create_or_update_variable_list(library, 'deps', [f'":{resources_target}"'])
resources_package = library.get_variable('resources_package')
if not resources_package:
resources_package_str = _generate_suggested_java_package(
build_file.get_path())
library.add_variable(
build_gn_editor.TargetVariable('resources_package',
f'"{resources_package_str}"'))
build_file.replace_target(library)
def _print_build_target_suggestions(build_gn_path: pathlib.Path,
new_resources: List[pathlib.Path]) -> None:
resources_target_suggestion = _generate_suggested_resources(
build_gn_path, new_resources)
android_library_target_suggestion = _generate_library_target(
build_gn_path, 'java_resources')
print(f'Suggestion for {build_gn_path}:')
print(resources_target_suggestion)
print(android_library_target_suggestion)
print()
if __name__ == '__main__':
main()
|
sot/eval_sot.py | take-cheeze/models | 112 | 12768769 | import argparse
import numpy as np
import chainer
from siam_rpn.general.eval_sot_vot import eval_sot_vot
from siam_rpn.siam_rpn import SiamRPN
from siam_rpn.siam_rpn_tracker import SiamRPNTracker
from siam_rpn.siam_mask_tracker import SiamMaskTracker
from siam_rpn.general.vot_tracking_dataset import VOTTrackingDataset
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
from chainer import iterators
from siam_rpn.general.predictor_with_gt import PredictorWithGT
def collate_images_from_same_video(data, used_ids=None):
imgs = data.slice[:, 'img']
polys = data.slice[:, 'poly']
video_ids = data.slice[:, 'video_id']
frame_ids = data.slice[:, 'frame_id']
if used_ids is None:
used_ids = np.unique(video_ids)
np.sort(used_ids)
videos = []
video_polys = []
for video_id in used_ids:
indices = np.where(video_ids == video_id)[0]
the_frame_ids = list(frame_ids.slice[indices])
assert all(list(the_frame_ids) == np.arange(len(the_frame_ids)))
videos.append(imgs.slice[indices])
video_polys.append(polys[indices])
return videos, video_polys
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--pretrained-model', type=str)
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--mask', action='store_true')
args = parser.parse_args()
data = VOTTrackingDataset('data')
if args.mask:
model = SiamRPN(multi_scale=False, mask=True)
chainer.serializers.load_npz(args.pretrained_model, model)
tracker = SiamMaskTracker(model)
else:
model = SiamRPN()
chainer.serializers.load_npz(args.pretrained_model, model)
tracker = SiamRPNTracker(model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
tracker.to_gpu()
videos, video_polys = collate_images_from_same_video(
data, used_ids=None)
video_dataset = chainer.datasets.TupleDataset(videos, video_polys)
it = iterators.SerialIterator(video_dataset, 1, False, False)
in_values, out_values, rest_values = apply_to_iterator(
PredictorWithGT(tracker, mask=args.mask), it,
n_input=2, hook=ProgressHook(len(video_dataset)))
# delete unused iterators explicitly
imgs, video_polys = in_values
pred_bboxes, pred_statuses, sizes = out_values
del imgs
video_polys = list(video_polys)
pred_bboxes = list(pred_bboxes)
pred_statuses = list(pred_statuses)
sizes = list(sizes)
np.savez(
'eval_sot_out.npz',
pred_bboxes=pred_bboxes, pred_statuses=pred_statuses,
gt_polys=video_polys, sizes=sizes)
result = eval_sot_vot(pred_bboxes, pred_statuses, video_polys, sizes)
print(result['eao'], result['accuracy'], result['robustness'])
|
setup.py | dendisuhubdy/deep_complex_networks | 641 | 12768835 | <reponame>dendisuhubdy/deep_complex_networks
#!/usr/bin/env python
from setuptools import setup, find_packages
with open('README.md') as f:
DESCRIPTION = f.read()
setup(
name='DeepComplexNetworks',
version='1',
license='MIT',
long_description=DESCRIPTION,
packages=find_packages() + find_packages('musicnet/'),
package_dir={'musicnet': 'musicnet/musicnet'},
scripts=['scripts/run.py', 'scripts/training.py', 'musicnet/scripts/train.py',
'musicnet/scripts/resample.py'],
install_requires=[
"numpy", "scipy", "sklearn", "Theano", "keras", "intervaltree",
"resampy", "mimir", "kerosene"]
)
|
smoke/features/steps/delete_steps.py | nhojpatrick/openshift_jenkins | 267 | 12768852 | <reponame>nhojpatrick/openshift_jenkins<gh_stars>100-1000
from smoke.features.steps.openshift import Openshift
from kubernetes import client, config
oc = Openshift()
v1 = client.CoreV1Api()
@then(u'we delete deploymentconfig.apps.openshift.io "jenkins"')
def del_dc(context):
res = oc.delete("deploymentconfig","jenkins",context.current_project)
if res == None:
raise AssertionError
@then(u'we delete route.route.openshift.io "jenkins"')
def del_route(context):
res = oc.delete("route","jenkins",context.current_project)
if res == None:
raise AssertionError
@then(u'delete configmap "jenkins-trusted-ca-bundle"')
def del_cm(context):
res = oc.delete("configmap","jenkins-trusted-ca-bundle",context.current_project)
if res == None:
raise AssertionError
@then(u'delete serviceaccount "jenkins"')
def del_sa(context):
res = oc.delete("serviceaccount","jenkins",context.current_project)
if res == None:
raise AssertionError
@then(u'delete rolebinding.authorization.openshift.io "jenkins_edit"')
def del_rb(context):
res = oc.delete("rolebinding","jenkins_edit",context.current_project)
if res == None:
raise AssertionError
@then(u'delete service "jenkins"')
def del_svc(context):
res = oc.delete("service","jenkins",context.current_project)
if res == None:
raise AssertionError
@then(u'delete service "jenkins-jnlp"')
def del_svc_jnlp(context):
res = oc.delete("service","jenkins-jnlp",context.current_project)
if res == None:
raise AssertionError
@then(u'delete all buildconfigs')
def del_bc(context):
res = oc.delete("bc","--all",context.current_project)
if res == None:
raise AssertionError
@then(u'delete all builds')
def del_builds(context):
res = oc.delete("builds","--all",context.current_project)
if res == None:
raise AssertionError
@then(u'delete all deploymentconfig')
def del_alldc(context):
res = oc.delete("deploymentconfig","--all",context.current_project)
if res == None:
raise AssertionError
@then(u'delete all remaining test resources')
@given(u'cleared from all test resources')
def del_all_remaining_test_resources(context):
delete_command = "all,rolebindings.authorization.openshift.io,bc,cm,is,pvc,sa,secret"
oc.delete(delete_command,"-l app=jenkins-ephemeral",context.current_project)
oc.delete(delete_command,"-l app=jenkins-persistent",context.current_project)
oc.delete(delete_command,"-l app=openshift-jee-sample",context.current_project)
oc.delete(delete_command,"-l app=jenkins-pipeline-example",context.current_project)
|
uninstall.py | slmjy/oci-ansible-modules | 106 | 12768876 | #!/usr/bin/env python
# Copyright (c) 2018, 2019 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
"""
Oracle Cloud Infrastructure(OCI) Ansible Modules Uninstaller Script
===================================================================
This script deletes OCI Ansible modules, Oracle docs fragments and Oracle Ansible utility file from the ansible path.
To uninstall OCI Ansible modules, execute:
$ ./uninstall.py
To execute the script with debug messages, execute:
$ ./uninstall.py --debug
author: "<NAME> (@rohitChaware)"
"""
from __future__ import print_function
import argparse
import os.path
import shutil
import sys
try:
import ansible
ANSIBLE_IS_INSTALLED = True
except ImportError:
ANSIBLE_IS_INSTALLED = False
debug = False
def parse_cli_args():
parser = argparse.ArgumentParser(description="Script to uninstall oci-ansible-role")
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Send debug messages to STDERR",
)
return parser.parse_args()
def log(*args, **kwargs):
if debug:
print(*args, file=sys.stderr, **kwargs)
def main():
if not ANSIBLE_IS_INSTALLED:
print("Could not load ansible module.")
sys.exit(1)
global debug
args = parse_cli_args()
if args.debug:
debug = True
ansible_path = os.path.dirname(os.path.abspath(os.path.realpath(ansible.__file__)))
log("Ansible path: {}".format(ansible_path))
module_utils_path = os.path.join(ansible_path, "module_utils", "oracle")
log("Module utilities path: {}".format(module_utils_path))
document_fragments_path_old = os.path.join(
ansible_path, "utils", "module_docs_fragments"
)
document_fragments_path_new = os.path.join(ansible_path, "plugins", "doc_fragments")
if os.path.exists(document_fragments_path_new):
document_fragments_path = document_fragments_path_new
else:
document_fragments_path = document_fragments_path_old
log("Documentation fragments path: {}".format(document_fragments_path))
delete(module_utils_path)
oci_docs_fragments = []
for filename in os.listdir(document_fragments_path):
if filename.startswith("oracle"):
oci_docs_fragments.append(os.path.join(document_fragments_path, filename))
delete(oci_docs_fragments)
oracle_module_dir_path = os.path.join(ansible_path, "modules", "cloud", "oracle")
delete(oracle_module_dir_path)
print("Uninstalled OCI Ansible modules successfully.")
def delete(paths):
if type(paths) is not list:
paths = [paths]
for path in paths:
if os.path.isdir(path):
print("Deleting directory {}".format(path))
shutil.rmtree(path)
elif os.path.isfile(path):
print("Deleting {}".format(path))
os.remove(path)
if __name__ == "__main__":
main()
|
tests/test_cmd.py | KrishanBhasin/giraffez | 122 | 12768881 | # -*- coding: utf-8 -*-
import pytest
from giraffez._teradata import RequestEnded, StatementEnded, StatementInfoEnded
import giraffez
from giraffez.constants import *
from giraffez.errors import *
from giraffez.types import *
class ResultsHelper:
"""
Helps to emulate how exceptions are raised when working with the CLIv2 so
that the control flow will be adequately represented.
"""
def __init__(self, rows):
self.first = True
self.index = 0
self.rows = rows
def get(self):
if self.first:
self.first = False
raise StatementInfoEnded
if self.index >= len(self.rows):
raise RequestEnded
row = self.rows[self.index]
self.index += 1
return row
def __call__(self):
return self.get()
@pytest.mark.usefixtures('config', 'context')
class TestCmd(object):
def test_results(self, mocker):
connect_mock = mocker.patch('giraffez.cmd.TeradataCmd._connect')
mock_columns = mocker.patch("giraffez.cmd.Cursor._columns")
cmd = giraffez.Cmd()
query = "select * from db1.info"
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns.return_value = columns
rows = [
["value1", "value2", "value3"],
["value1", "value2", "value3"],
["value1", "value2", "value3"],
]
expected_rows = [
{"col1": "value1", "col2": "value2", "col3": "value3"},
{"col1": "value1", "col2": "value2", "col3": "value3"},
{"col1": "value1", "col2": "value2", "col3": "value3"},
]
cmd.cmd = mocker.MagicMock()
cmd.cmd.fetchone.side_effect = ResultsHelper(rows)
result = list(cmd.execute(query))
assert [x.items() for x in result] == expected_rows
cmd._close()
# This ensures that the config was proper mocked
connect_mock.assert_called_with('db1', 'user123', '<PASSWORD>', None, None)
def test_invalid_credentials(self, mocker):
connect_mock = mocker.patch('giraffez.cmd.TeradataCmd._connect')
connect_mock.side_effect = InvalidCredentialsError("test")
with pytest.raises(InvalidCredentialsError):
cmd = giraffez.Cmd(protect=True)
cmd._close()
@pytest.mark.usefixtures('config', 'context', 'tmpfiles')
class TestInsert(object):
def test_insert_from_file(self, mocker, tmpfiles):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3"]))
f.write("\n")
rows = []
for i in range(100):
rows.append("|".join(["value1", "value2", "value3"]))
f.write("\n".join(rows))
with giraffez.Cmd() as cmd:
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|")
assert result.get('count') == 100
def test_insert_from_file_quoted(self, mocker, tmpfiles):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3"]))
f.write("\n")
rows = []
for i in range(99):
rows.append("|".join(["value1", "value2", "value3"]))
rows.append("|".join(["value1",'"value2|withpipe"', "value3"]))
f.write("\n".join(rows))
with giraffez.Cmd() as cmd:
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|")
assert result.get('count') == 100
def test_insert_from_file_single_quoted(self, mocker, tmpfiles):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3"]))
f.write("\n")
rows = []
for i in range(99):
rows.append("|".join(["value1", "value2", "value3"]))
rows.append("|".join(["value1","'value2|withpipe'", "value3"]))
f.write("\n".join(rows))
with giraffez.Cmd() as cmd:
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|", quotechar="'")
assert result.get('count') == 100
def test_insert_from_file_nonstandard_quote(self, mocker, tmpfiles):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3"]))
f.write("\n")
rows = []
for i in range(99):
rows.append("|".join(["value1", "value2", "value3"]))
rows.append("|".join(['va"lue1','$value2|withpipe"and"quote$', "value3"]))
f.write("\n".join(rows))
with giraffez.Cmd() as cmd:
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|", quotechar="$")
assert result.get('count') == 100
def test_insert_from_file_error(self, mocker, tmpfiles):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3"]))
f.write("\n")
f.write("|".join(["value1", "value2", "value3", "value4"]))
f.write("\n")
with giraffez.Cmd() as cmd:
cmd.panic = False
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|")
def test_insert_from_file_error_panic(self, mocker, tmpfiles):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3"]))
f.write("\n")
f.write("|".join(["value1", "value2", "value3", "value4"]))
f.write("\n")
with giraffez.Cmd() as cmd:
with pytest.raises(GiraffeEncodeError):
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|")
print(result)
def test_insert_from_file_invalid_header(self, mocker, tmpfiles):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
# Invalid column (blank string)
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3", "", ""]))
f.write("\n")
f.write("|".join(["value1", "value2", "value3"]))
f.write("\n")
with giraffez.Cmd() as cmd:
with pytest.raises(GiraffeError):
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|")
print(result)
# Invalid column (wrong name)
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col4"]))
f.write("\n")
f.write("|".join(["value1", "value2", "value3"]))
f.write("\n")
with giraffez.Cmd() as cmd:
with pytest.raises(GiraffeError):
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|")
print(result)
# Too many columns (duplicate name)
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3", "col3"]))
f.write("\n")
f.write("|".join(["value1", "value2", "value3"]))
f.write("\n")
with giraffez.Cmd() as cmd:
with pytest.raises(GiraffeEncodeError):
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|")
print(result)
def test_insert_insert_no_specify_fields(self, mocker):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
rows = [
("value1", "value3"),
("value1", "value3"),
("value1", "value3"),
]
with giraffez.Cmd() as cmd:
with pytest.raises(GiraffeEncodeError):
cmd.insert("db1.test", rows)
|
terrascript/oneandone/__init__.py | hugovk/python-terrascript | 507 | 12768930 | <filename>terrascript/oneandone/__init__.py
# terrascript/oneandone/__init__.py
import terrascript
class oneandone(terrascript.Provider):
pass
|
cli/endpoints/online/triton/ensemble/models/triton/bidaf-preprocess/1/model.py | denniseik/azureml-examples | 331 | 12768949 | import nltk
import json
import numpy as np
from nltk import word_tokenize
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
# You must parse model_config. JSON string is not parsed here
self.model_config = model_config = json.loads(args["model_config"])
# Get OUTPUT0 configuration
output0_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT0")
# Get OUTPUT1 configuration
output1_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT1")
# Get OUTPUT2 configuration
output2_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT2")
# Get OUTPUT3 configuration
output3_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT3")
# Convert Triton types to numpy types
self.output0_dtype = pb_utils.triton_string_to_numpy(
output0_config["data_type"]
)
self.output1_dtype = pb_utils.triton_string_to_numpy(
output1_config["data_type"]
)
self.output2_dtype = pb_utils.triton_string_to_numpy(
output2_config["data_type"]
)
self.output3_dtype = pb_utils.triton_string_to_numpy(
output3_config["data_type"]
)
# Get model repository path to read labels
self.model_repository = model_repository = args["model_repository"]
print(model_repository)
# Initialize tokenizer
nltk.download("punkt")
def tokenize(self, text):
tokens = word_tokenize(text)
# split into lower-case word tokens, in numpy array with shape of (seq, 1)
words = np.array([w.lower() for w in tokens], dtype=np.object_).reshape(-1, 1)
# split words into chars, in numpy array with shape of (seq, 1, 1, 16)
chars = [[c for c in t][:16] for t in tokens]
chars = [cs + [""] * (16 - len(cs)) for cs in chars]
chars = np.array(chars, dtype=np.object_).reshape(-1, 1, 1, 16)
return words, chars
def execute(self, requests):
"""
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
output0_dtype = self.output0_dtype
output1_dtype = self.output1_dtype
output2_dtype = self.output2_dtype
output3_dtype = self.output3_dtype
responses = []
# Every Python backend must iterate over everyone of the requests
# and create a pb_utils.InferenceResponse for each of them.
for request in requests:
# Get INPUT0
in_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0")
context = in_0.as_numpy().astype(str)
print(context)
# Get INPUT1
in_0 = pb_utils.get_input_tensor_by_name(request, "INPUT1")
query = in_0.as_numpy().astype(str)
print(query)
cw, cc = self.tokenize(context[0])
qw, qc = self.tokenize(query[0])
out_0 = np.array(qw, dtype=output0_dtype)
out_1 = np.array(cc, dtype=output1_dtype)
out_2 = np.array(qc, dtype=output2_dtype)
out_3 = np.array(cw, dtype=output3_dtype)
# Create output tensors. You need pb_utils.Tensor objects to create pb_utils.InferenceResponse.
out_tensor_0 = pb_utils.Tensor("OUTPUT0", out_0)
out_tensor_1 = pb_utils.Tensor("OUTPUT1", out_1)
out_tensor_2 = pb_utils.Tensor("OUTPUT2", out_2)
out_tensor_3 = pb_utils.Tensor("OUTPUT3", out_3)
inference_response = pb_utils.InferenceResponse(
output_tensors=[out_tensor_0, out_tensor_1, out_tensor_2, out_tensor_3]
)
responses.append(inference_response)
return responses
def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is OPTIONAL. This function allows
the model to perform any necessary clean ups before exit.
"""
print("Cleaning up...")
|
test/tests/test_environments/python_src/main.py | jithindevasia/fission | 6,891 | 12768952 | <gh_stars>1000+
def main():
return 'THIS_IS_MAIN_MAIN'
def func():
return 'THIS_IS_MAIN_FUNC'
|
test/test_server.py | keredson/tinyweb | 138 | 12768989 | <reponame>keredson/tinyweb
#!/usr/bin/env micropython
"""
Unittests for Tiny Web
MIT license
(C) <NAME> 2017-2018
"""
import unittest
import uos as os
import uerrno as errno
import uasyncio as asyncio
from tinyweb import webserver
from tinyweb.server import urldecode_plus, parse_query_string
from tinyweb.server import request, HTTPException
# Helper to delete file
def delete_file(fn):
# "unlink" gets renamed to "remove" in micropython,
# so support both
if hasattr(os, 'unlink'):
os.unlink(fn)
else:
os.remove(fn)
# HTTP headers helpers
def HDR(str):
return '{}\r\n'.format(str)
HDRE = '\r\n'
class mockReader():
"""Mock for coroutine reader class"""
def __init__(self, lines):
if type(lines) is not list:
lines = [lines]
self.lines = lines
self.idx = 0
async def readline(self):
self.idx += 1
# Convert and return str to bytes
return self.lines[self.idx - 1].encode()
def readexactly(self, n):
return self.readline()
class mockWriter():
"""Mock for coroutine writer class"""
def __init__(self, generate_expection=None):
"""
keyword arguments:
generate_expection - raise exception when calling send()
"""
self.s = 1
self.history = []
self.closed = False
self.generate_expection = generate_expection
async def awrite(self, buf, off=0, sz=-1):
if sz == -1:
sz = len(buf) - off
if self.generate_expection:
raise self.generate_expection
# Save biffer into history - so to be able to assert then
self.history.append(buf[:sz])
async def aclose(self):
self.closed = True
async def mock_wait_for(coro, timeout):
await coro
def run_coro(coro):
# Mock wait_for() function with simple dummy
asyncio.wait_for = (lambda c, t: await c)
"""Simple helper to run coroutine"""
for i in coro:
pass
# Tests
class Utils(unittest.TestCase):
def testUrldecode(self):
runs = [('abc%20def', 'abc def'),
('abc%%20def', 'abc% def'),
('%%%', '%%%'),
('%20%20', ' '),
('abc', 'abc'),
('a%25%25%25c', 'a%%%c'),
('a++b', 'a b'),
('+%25+', ' % '),
('+%2B+', ' + '),
('%20+%2B+%41', ' + A'),
]
for r in runs:
self.assertEqual(urldecode_plus(r[0]), r[1])
def testParseQueryString(self):
runs = [('k1=v2', {'k1': 'v2'}),
('k1=v2&k11=v11', {'k1': 'v2',
'k11': 'v11'}),
('k1=v2&k11=', {'k1': 'v2',
'k11': ''}),
('k1=+%20', {'k1': ' '}),
('%6b1=+%20', {'k1': ' '}),
('k1=%3d1', {'k1': '=1'}),
('11=22%26&%3d=%3d', {'11': '22&',
'=': '='}),
]
for r in runs:
self.assertEqual(parse_query_string(r[0]), r[1])
class ServerParts(unittest.TestCase):
def testRequestLine(self):
runs = [('GETT / HTTP/1.1', 'GETT', '/'),
('TTEG\t/blah\tHTTP/1.1', 'TTEG', '/blah'),
('POST /qq/?q=q HTTP', 'POST', '/qq/', 'q=q'),
('POST /?q=q BSHT', 'POST', '/', 'q=q'),
('POST /?q=q&a=a JUNK', 'POST', '/', 'q=q&a=a')]
for r in runs:
try:
req = request(mockReader(r[0]))
run_coro(req.read_request_line())
self.assertEqual(r[1].encode(), req.method)
self.assertEqual(r[2].encode(), req.path)
if len(r) > 3:
self.assertEqual(r[3].encode(), req.query_string)
except Exception:
self.fail('exception on payload --{}--'.format(r[0]))
def testRequestLineEmptyLinesBefore(self):
req = request(mockReader(['\n', '\r\n', 'GET /?a=a HTTP/1.1']))
run_coro(req.read_request_line())
self.assertEqual(b'GET', req.method)
self.assertEqual(b'/', req.path)
self.assertEqual(b'a=a', req.query_string)
def testRequestLineNegative(self):
runs = ['',
'\t\t',
' ',
' / HTTP/1.1',
'GET',
'GET /',
'GET / '
]
for r in runs:
with self.assertRaises(HTTPException):
req = request(mockReader(r))
run_coro(req.read_request_line())
def testHeadersSimple(self):
req = request(mockReader([HDR('Host: google.com'),
HDRE]))
run_coro(req.read_headers([b'Host']))
self.assertEqual(req.headers, {b'Host': b'google.com'})
def testHeadersSpaces(self):
req = request(mockReader([HDR('Host: \t google.com \t '),
HDRE]))
run_coro(req.read_headers([b'Host']))
self.assertEqual(req.headers, {b'Host': b'google.com'})
def testHeadersEmptyValue(self):
req = request(mockReader([HDR('Host:'),
HDRE]))
run_coro(req.read_headers([b'Host']))
self.assertEqual(req.headers, {b'Host': b''})
def testHeadersMultiple(self):
req = request(mockReader([HDR('Host: google.com'),
HDR('Junk: you blah'),
HDR('Content-type: file'),
HDRE]))
hdrs = {b'Host': b'google.com',
b'Junk': b'you blah',
b'Content-type': b'file'}
run_coro(req.read_headers([b'Host', b'Junk', b'Content-type']))
self.assertEqual(req.headers, hdrs)
def testUrlFinderExplicit(self):
urls = [('/', 1),
('/%20', 2),
('/a/b', 3),
('/aac', 5)]
junk = ['//', '', '/a', '/aa', '/a/fhhfhfhfhfhf']
# Create server, add routes
srv = webserver()
for u in urls:
srv.add_route(u[0], u[1])
# Search them all
for u in urls:
# Create mock request object with "pre-parsed" url path
rq = request(mockReader([]))
rq.path = u[0].encode()
f, args = srv._find_url_handler(rq)
self.assertEqual(u[1], f)
# Some simple negative cases
for j in junk:
rq = request(mockReader([]))
rq.path = j.encode()
f, args = srv._find_url_handler(rq)
self.assertIsNone(f)
self.assertIsNone(args)
def testUrlFinderParameterized(self):
srv = webserver()
# Add few routes
srv.add_route('/', 0)
srv.add_route('/<user_name>', 1)
srv.add_route('/a/<id>', 2)
# Check first url (non param)
rq = request(mockReader([]))
rq.path = b'/'
f, args = srv._find_url_handler(rq)
self.assertEqual(f, 0)
# Check second url
rq.path = b'/user1'
f, args = srv._find_url_handler(rq)
self.assertEqual(f, 1)
self.assertEqual(args['_param_name'], 'user_name')
self.assertEqual(rq._param, 'user1')
# Check third url
rq.path = b'/a/123456'
f, args = srv._find_url_handler(rq)
self.assertEqual(f, 2)
self.assertEqual(args['_param_name'], 'id')
self.assertEqual(rq._param, '123456')
# When param is empty and there is no non param endpoint
rq.path = b'/a/'
f, args = srv._find_url_handler(rq)
self.assertEqual(f, 2)
self.assertEqual(rq._param, '')
def testUrlFinderNegative(self):
srv = webserver()
# empty URL is not allowed
with self.assertRaises(ValueError):
srv.add_route('', 1)
# Query string is not allowed
with self.assertRaises(ValueError):
srv.add_route('/?a=a', 1)
# Duplicate urls
srv.add_route('/duppp', 1)
with self.assertRaises(ValueError):
srv.add_route('/duppp', 1)
# We want to test decorators as well
server_for_decorators = webserver()
@server_for_decorators.route('/uid/<user_id>')
@server_for_decorators.route('/uid2/<user_id>')
async def route_for_decorator(req, resp, user_id):
await resp.start_html()
await resp.send('YO, {}'.format(user_id))
@server_for_decorators.resource('/rest1/<user_id>')
def resource_for_decorator1(data, user_id):
return {'name': user_id}
@server_for_decorators.resource('/rest2/<user_id>')
async def resource_for_decorator2(data, user_id):
yield '{"name": user_id}'
class ServerFull(unittest.TestCase):
def setUp(self):
self.dummy_called = False
self.data = {}
# "Register" one connection into map for dedicated decor server
server_for_decorators.conns[id(1)] = None
self.hello_world_history = ['HTTP/1.0 200 MSG\r\n' +
'Content-Type: text/html\r\n\r\n',
'<html><h1>Hello world</h1></html>']
# Create one more server - to simplify bunch of tests
self.srv = webserver()
self.srv.conns[id(1)] = None
def testRouteDecorator1(self):
"""Test @.route() decorator"""
# First decorator
rdr = mockReader(['GET /uid/man1 HTTP/1.1\r\n',
HDRE])
wrt = mockWriter()
# "Send" request
run_coro(server_for_decorators._handler(rdr, wrt))
# Ensure that proper response "sent"
expected = ['HTTP/1.0 200 MSG\r\n' +
'Content-Type: text/html\r\n\r\n',
'YO, man1']
self.assertEqual(wrt.history, expected)
self.assertTrue(wrt.closed)
def testRouteDecorator2(self):
# Second decorator
rdr = mockReader(['GET /uid2/man2 HTTP/1.1\r\n',
HDRE])
wrt = mockWriter()
# Re-register connection
server_for_decorators.conns[id(1)] = None
# "Send" request
run_coro(server_for_decorators._handler(rdr, wrt))
# Ensure that proper response "sent"
expected = ['HTTP/1.0 200 MSG\r\n' +
'Content-Type: text/html\r\n\r\n',
'YO, man2']
self.assertEqual(wrt.history, expected)
self.assertTrue(wrt.closed)
def testResourceDecorator1(self):
"""Test @.resource() decorator"""
rdr = mockReader(['GET /rest1/man1 HTTP/1.1\r\n',
HDRE])
wrt = mockWriter()
run_coro(server_for_decorators._handler(rdr, wrt))
expected = ['HTTP/1.0 200 MSG\r\n'
'Access-Control-Allow-Origin: *\r\n' +
'Access-Control-Allow-Headers: *\r\n' +
'Content-Length: 16\r\n' +
'Access-Control-Allow-Methods: GET\r\n' +
'Content-Type: application/json\r\n\r\n',
'{"name": "man1"}']
self.assertEqual(wrt.history, expected)
self.assertTrue(wrt.closed)
def testResourceDecorator2(self):
rdr = mockReader(['GET /rest2/man2 HTTP/1.1\r\n',
HDRE])
wrt = mockWriter()
run_coro(server_for_decorators._handler(rdr, wrt))
expected = ['HTTP/1.1 200 MSG\r\n' +
'Access-Control-Allow-Methods: GET\r\n' +
'Connection: close\r\n' +
'Access-Control-Allow-Headers: *\r\n' +
'Content-Type: application/json\r\n' +
'Transfer-Encoding: chunked\r\n' +
'Access-Control-Allow-Origin: *\r\n\r\n',
'11\r\n',
'{"name": user_id}',
'\r\n',
'0\r\n\r\n'
]
self.assertEqual(wrt.history, expected)
self.assertTrue(wrt.closed)
def testCatchAllDecorator(self):
# A fresh server for the catchall handler
server_for_catchall_decorator = webserver()
# Catchall decorator and handler
@server_for_catchall_decorator.catchall()
async def route_for_catchall_decorator(req, resp):
await resp.start_html()
await resp.send('my404')
rdr = mockReader(['GET /this/is/an/invalid/url HTTP/1.1\r\n',
HDRE])
wrt = mockWriter()
server_for_catchall_decorator.conns[id(1)] = None
run_coro(server_for_catchall_decorator._handler(rdr, wrt))
expected = ['HTTP/1.0 200 MSG\r\n' +
'Content-Type: text/html\r\n\r\n',
'my404']
self.assertEqual(wrt.history, expected)
self.assertTrue(wrt.closed)
async def dummy_handler(self, req, resp):
"""Dummy URL handler. It just records the fact - it has been called"""
self.dummy_req = req
self.dummy_resp = resp
self.dummy_called = True
async def dummy_post_handler(self, req, resp):
self.data = await req.read_parse_form_data()
async def hello_world_handler(self, req, resp):
await resp.start_html()
await resp.send('<html><h1>Hello world</h1></html>')
async def redirect_handler(self, req, resp):
await resp.redirect('/blahblah', msg='msg:)')
def testStartHTML(self):
"""Verify that request.start_html() works well"""
self.srv.add_route('/', self.hello_world_handler)
rdr = mockReader(['GET / HTTP/1.1\r\n',
HDR('Host: blah.com'),
HDRE])
wrt = mockWriter()
# "Send" request
run_coro(self.srv._handler(rdr, wrt))
# Ensure that proper response "sent"
self.assertEqual(wrt.history, self.hello_world_history)
self.assertTrue(wrt.closed)
def testRedirect(self):
"""Verify that request.start_html() works well"""
self.srv.add_route('/', self.redirect_handler)
rdr = mockReader(['GET / HTTP/1.1\r\n',
HDR('Host: blah.com'),
HDRE])
wrt = mockWriter()
# "Send" request
run_coro(self.srv._handler(rdr, wrt))
# Ensure that proper response "sent"
exp = ['HTTP/1.0 302 MSG\r\n' +
'Location: /blahblah\r\nContent-Length: 5\r\n\r\n',
'msg:)']
self.assertEqual(wrt.history, exp)
def testRequestBodyUnknownType(self):
"""Unknow HTTP body test - empty dict expected"""
self.srv.add_route('/', self.dummy_post_handler, methods=['POST'])
rdr = mockReader(['POST / HTTP/1.1\r\n',
HDR('Host: blah.com'),
HDR('Content-Length: 5'),
HDRE,
'12345'])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
# Check extracted POST body
self.assertEqual(self.data, {})
def testRequestBodyJson(self):
"""JSON encoded POST body"""
self.srv.add_route('/',
self.dummy_post_handler,
methods=['POST'],
save_headers=['Content-Type', 'Content-Length'])
rdr = mockReader(['POST / HTTP/1.1\r\n',
HDR('Content-Type: application/json'),
HDR('Content-Length: 10'),
HDRE,
'{"a": "b"}'])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
# Check parsed POST body
self.assertEqual(self.data, {'a': 'b'})
def testRequestBodyUrlencoded(self):
"""Regular HTML form"""
self.srv.add_route('/',
self.dummy_post_handler,
methods=['POST'],
save_headers=['Content-Type', 'Content-Length'])
rdr = mockReader(['POST / HTTP/1.1\r\n',
HDR('Content-Type: application/x-www-form-urlencoded; charset=UTF-8'),
HDR('Content-Length: 10'),
HDRE,
'a=b&c=%20d'])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
# Check parsed POST body
self.assertEqual(self.data, {'a': 'b', 'c': ' d'})
def testRequestBodyNegative(self):
"""Regular HTML form"""
self.srv.add_route('/',
self.dummy_post_handler,
methods=['POST'],
save_headers=['Content-Type', 'Content-Length'])
rdr = mockReader(['POST / HTTP/1.1\r\n',
HDR('Content-Type: application/json'),
HDR('Content-Length: 9'),
HDRE,
'some junk'])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
# payload broken - HTTP 400 expected
self.assertEqual(wrt.history, ['HTTP/1.0 400 MSG\r\n\r\n'])
def testRequestLargeBody(self):
"""Max Body size check"""
self.srv.add_route('/',
self.dummy_post_handler,
methods=['POST'],
save_headers=['Content-Type', 'Content-Length'],
max_body_size=5)
rdr = mockReader(['POST / HTTP/1.1\r\n',
HDR('Content-Type: application/json'),
HDR('Content-Length: 9'),
HDRE,
'some junk'])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
# payload broken - HTTP 400 expected
self.assertEqual(wrt.history, ['HTTP/1.0 413 MSG\r\n\r\n'])
async def route_parameterized_handler(self, req, resp, user_name):
await resp.start_html()
await resp.send('<html>Hello, {}</html>'.format(user_name))
def testRouteParameterized(self):
"""Verify that route with params works fine"""
self.srv.add_route('/db/<user_name>', self.route_parameterized_handler)
rdr = mockReader(['GET /db/user1 HTTP/1.1\r\n',
HDR('Host: junk.com'),
HDRE])
wrt = mockWriter()
# "Send" request
run_coro(self.srv._handler(rdr, wrt))
# Ensure that proper response "sent"
expected = ['HTTP/1.0 200 MSG\r\n' +
'Content-Type: text/html\r\n\r\n',
'<html>Hello, user1</html>']
self.assertEqual(wrt.history, expected)
self.assertTrue(wrt.closed)
def testParseHeadersOnOff(self):
"""Verify parameter parse_headers works"""
self.srv.add_route('/', self.dummy_handler, save_headers=['H1', 'H2'])
rdr = mockReader(['GET / HTTP/1.1\r\n',
HDR('H1: blah.com'),
HDR('H2: lalalla'),
HDR('Junk: fsdfmsdjfgjsdfjunk.com'),
HDRE])
# "Send" request
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
self.assertTrue(self.dummy_called)
# Check for headers - only 2 of 3 should be collected, others - ignore
hdrs = {b'H1': b'blah.com',
b'H2': b'lalalla'}
self.assertEqual(self.dummy_req.headers, hdrs)
self.assertTrue(wrt.closed)
def testDisallowedMethod(self):
"""Verify that server respects allowed methods"""
self.srv.add_route('/', self.hello_world_handler)
self.srv.add_route('/post_only', self.dummy_handler, methods=['POST'])
rdr = mockReader(['GET / HTTP/1.0\r\n',
HDRE])
# "Send" GET request, by default GET is enabled
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
self.assertEqual(wrt.history, self.hello_world_history)
self.assertTrue(wrt.closed)
# "Send" GET request to POST only location
self.srv.conns[id(1)] = None
self.dummy_called = False
rdr = mockReader(['GET /post_only HTTP/1.1\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
# Hanlder should not be called - method not allowed
self.assertFalse(self.dummy_called)
exp = ['HTTP/1.0 405 MSG\r\n\r\n']
self.assertEqual(wrt.history, exp)
# Connection must be closed
self.assertTrue(wrt.closed)
def testAutoOptionsMethod(self):
"""Test auto implementation of OPTIONS method"""
self.srv.add_route('/', self.hello_world_handler, methods=['POST', 'PUT', 'DELETE'])
self.srv.add_route('/disabled', self.hello_world_handler, auto_method_options=False)
rdr = mockReader(['OPTIONS / HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 200 MSG\r\n' +
'Access-Control-Allow-Headers: *\r\n'
'Content-Length: 0\r\n'
'Access-Control-Allow-Origin: *\r\n'
'Access-Control-Allow-Methods: POST, PUT, DELETE\r\n\r\n']
self.assertEqual(wrt.history, exp)
self.assertTrue(wrt.closed)
def testPageNotFound(self):
"""Verify that malformed request generates proper response"""
rdr = mockReader(['GET /not_existing HTTP/1.1\r\n',
HDR('Host: blah.com'),
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 404 MSG\r\n\r\n']
self.assertEqual(wrt.history, exp)
# Connection must be closed
self.assertTrue(wrt.closed)
def testMalformedRequest(self):
"""Verify that malformed request generates proper response"""
rdr = mockReader(['GET /\r\n',
HDR('Host: blah.com'),
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 400 MSG\r\n\r\n']
self.assertEqual(wrt.history, exp)
# Connection must be closed
self.assertTrue(wrt.closed)
class ResourceGetPost():
"""Simple REST API resource class with just two methods"""
def get(self, data):
return {'data1': 'junk'}
def post(self, data):
return data
class ResourceGetParam():
"""Parameterized REST API resource"""
def __init__(self):
self.user_id = 'user_id'
def get(self, data, user_id):
return {self.user_id: user_id}
class ResourceGetArgs():
"""REST API resource with additional arguments"""
def get(self, data, arg1, arg2):
return {'arg1': arg1, 'arg2': arg2}
class ResourceGenerator():
"""REST API with generator as result"""
async def get(self, data):
yield 'longlongchunkchunk1'
yield 'chunk2'
# unicode support
yield '\u265E'
class ResourceNegative():
"""To cover negative test cases"""
def delete(self, data):
# Broken pipe emulation
raise OSError(32, '', '')
def put(self, data):
# Simple unhandled expection
raise Exception('something')
class ServerResource(unittest.TestCase):
def setUp(self):
self.srv = webserver()
self.srv.conns[id(1)] = None
self.srv.add_resource(ResourceGetPost, '/')
self.srv.add_resource(ResourceGetParam, '/param/<user_id>')
self.srv.add_resource(ResourceGetArgs, '/args', arg1=1, arg2=2)
self.srv.add_resource(ResourceGenerator, '/gen')
self.srv.add_resource(ResourceNegative, '/negative')
def testOptions(self):
# Ensure that only GET/POST methods are allowed:
rdr = mockReader(['OPTIONS / HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 200 MSG\r\n' +
'Access-Control-Allow-Headers: *\r\n'
'Content-Length: 0\r\n'
'Access-Control-Allow-Origin: *\r\n'
'Access-Control-Allow-Methods: GET, POST\r\n\r\n']
self.assertEqual(wrt.history, exp)
def testGet(self):
rdr = mockReader(['GET / HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 200 MSG\r\n' +
'Access-Control-Allow-Origin: *\r\n'
'Access-Control-Allow-Headers: *\r\n'
'Content-Length: 17\r\n'
'Access-Control-Allow-Methods: GET, POST\r\n'
'Content-Type: application/json\r\n\r\n',
'{"data1": "junk"}']
self.assertEqual(wrt.history, exp)
def testGetWithParam(self):
rdr = mockReader(['GET /param/123 HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 200 MSG\r\n' +
'Access-Control-Allow-Origin: *\r\n'
'Access-Control-Allow-Headers: *\r\n'
'Content-Length: 18\r\n'
'Access-Control-Allow-Methods: GET\r\n'
'Content-Type: application/json\r\n\r\n',
'{"user_id": "123"}']
self.assertEqual(wrt.history, exp)
def testGetWithArgs(self):
rdr = mockReader(['GET /args HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 200 MSG\r\n' +
'Access-Control-Allow-Origin: *\r\n'
'Access-Control-Allow-Headers: *\r\n'
'Content-Length: 22\r\n'
'Access-Control-Allow-Methods: GET\r\n'
'Content-Type: application/json\r\n\r\n',
'{"arg1": 1, "arg2": 2}']
self.assertEqual(wrt.history, exp)
def testGenerator(self):
rdr = mockReader(['GET /gen HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.1 200 MSG\r\n' +
'Access-Control-Allow-Methods: GET\r\n' +
'Connection: close\r\n' +
'Access-Control-Allow-Headers: *\r\n' +
'Content-Type: application/json\r\n' +
'Transfer-Encoding: chunked\r\n' +
'Access-Control-Allow-Origin: *\r\n\r\n',
'13\r\n',
'longlongchunkchunk1',
'\r\n',
'6\r\n',
'chunk2',
'\r\n',
# next chunk is 1 char len UTF-8 string
'3\r\n',
'\u265E',
'\r\n',
'0\r\n\r\n']
self.assertEqual(wrt.history, exp)
def testPost(self):
# Ensure that parameters from query string / body will be combined as well
rdr = mockReader(['POST /?qs=qs1 HTTP/1.0\r\n',
HDR('Content-Length: 17'),
HDR('Content-Type: application/json'),
HDRE,
'{"body": "body1"}'])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 200 MSG\r\n' +
'Access-Control-Allow-Origin: *\r\n'
'Access-Control-Allow-Headers: *\r\n'
'Content-Length: 30\r\n'
'Access-Control-Allow-Methods: GET, POST\r\n'
'Content-Type: application/json\r\n\r\n',
'{"qs": "qs1", "body": "body1"}']
self.assertEqual(wrt.history, exp)
def testInvalidMethod(self):
rdr = mockReader(['PUT / HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 405 MSG\r\n\r\n']
self.assertEqual(wrt.history, exp)
def testException(self):
rdr = mockReader(['PUT /negative HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 500 MSG\r\n\r\n']
self.assertEqual(wrt.history, exp)
def testBrokenPipe(self):
rdr = mockReader(['DELETE /negative HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
self.assertEqual(wrt.history, [])
class StaticContent(unittest.TestCase):
def setUp(self):
self.srv = webserver()
self.srv.conns[id(1)] = None
self.tempfn = '__tmp.html'
self.ctype = None
self.etype = None
self.max_age = 2592000
with open(self.tempfn, 'wb') as f:
f.write('someContent blah blah')
def tearDown(self):
try:
delete_file(self.tempfn)
except OSError:
pass
async def send_file_handler(self, req, resp):
await resp.send_file(self.tempfn,
content_type=self.ctype,
content_encoding=self.etype,
max_age=self.max_age)
def testSendFileManual(self):
"""Verify send_file works great with manually defined parameters"""
self.ctype = 'text/plain'
self.etype = 'gzip'
self.max_age = 100
self.srv.add_route('/', self.send_file_handler)
rdr = mockReader(['GET / HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 200 MSG\r\n' +
'Cache-Control: max-age=100, public\r\n'
'Content-Type: text/plain\r\n'
'Content-Length: 21\r\n'
'Content-Encoding: gzip\r\n\r\n',
bytearray(b'someContent blah blah')]
self.assertEqual(wrt.history, exp)
self.assertTrue(wrt.closed)
def testSendFileNotFound(self):
"""Verify 404 error for non existing files"""
self.srv.add_route('/', self.send_file_handler)
rdr = mockReader(['GET / HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
# Intentionally delete file before request
delete_file(self.tempfn)
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 404 MSG\r\n\r\n']
self.assertEqual(wrt.history, exp)
self.assertTrue(wrt.closed)
def testSendFileConnectionReset(self):
self.srv.add_route('/', self.send_file_handler)
rdr = mockReader(['GET / HTTP/1.0\r\n',
HDRE])
# tell mockWrite to raise error during send()
wrt = mockWriter(generate_expection=OSError(errno.ECONNRESET))
run_coro(self.srv._handler(rdr, wrt))
# there should be no payload due to connected reset
self.assertEqual(wrt.history, [])
self.assertTrue(wrt.closed)
if __name__ == '__main__':
unittest.main()
|
tests/test_dependency_duplicates.py | Aryabhata-Rootspring/fastapi | 53,007 | 12769000 | <reponame>Aryabhata-Rootspring/fastapi<gh_stars>1000+
from typing import List
from fastapi import Depends, FastAPI
from fastapi.testclient import TestClient
from pydantic import BaseModel
app = FastAPI()
client = TestClient(app)
class Item(BaseModel):
data: str
def duplicate_dependency(item: Item):
return item
def dependency(item2: Item):
return item2
def sub_duplicate_dependency(
item: Item, sub_item: Item = Depends(duplicate_dependency)
):
return [item, sub_item]
@app.post("/with-duplicates")
async def with_duplicates(item: Item, item2: Item = Depends(duplicate_dependency)):
return [item, item2]
@app.post("/no-duplicates")
async def no_duplicates(item: Item, item2: Item = Depends(dependency)):
return [item, item2]
@app.post("/with-duplicates-sub")
async def no_duplicates_sub(
item: Item, sub_items: List[Item] = Depends(sub_duplicate_dependency)
):
return [item, sub_items]
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/with-duplicates": {
"post": {
"summary": "With Duplicates",
"operationId": "with_duplicates_with_duplicates_post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/no-duplicates": {
"post": {
"summary": "No Duplicates",
"operationId": "no_duplicates_no_duplicates_post",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_no_duplicates_no_duplicates_post"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/with-duplicates-sub": {
"post": {
"summary": "No Duplicates Sub",
"operationId": "no_duplicates_sub_with_duplicates_sub_post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_no_duplicates_no_duplicates_post": {
"title": "Body_no_duplicates_no_duplicates_post",
"required": ["item", "item2"],
"type": "object",
"properties": {
"item": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["data"],
"type": "object",
"properties": {"data": {"title": "Data", "type": "string"}},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_no_duplicates_invalid():
response = client.post("/no-duplicates", json={"item": {"data": "myitem"}})
assert response.status_code == 422, response.text
assert response.json() == {
"detail": [
{
"loc": ["body", "item2"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_no_duplicates():
response = client.post(
"/no-duplicates",
json={"item": {"data": "myitem"}, "item2": {"data": "myitem2"}},
)
assert response.status_code == 200, response.text
assert response.json() == [{"data": "myitem"}, {"data": "myitem2"}]
def test_duplicates():
response = client.post("/with-duplicates", json={"data": "myitem"})
assert response.status_code == 200, response.text
assert response.json() == [{"data": "myitem"}, {"data": "myitem"}]
def test_sub_duplicates():
response = client.post("/with-duplicates-sub", json={"data": "myitem"})
assert response.status_code == 200, response.text
assert response.json() == [
{"data": "myitem"},
[{"data": "myitem"}, {"data": "myitem"}],
]
|
account/forms.py | ShwethaRGowda/FADB | 149 | 12769006 | from django import forms
from .models import UserProfile
class ProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['name', 'photo']
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'photo': forms.FileInput(attrs={'class': 'form-control'}),
} |
h2o-py/tests/testdir_munging/pyunit_runif.py | ahmedengu/h2o-3 | 6,098 | 12769012 | from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def runif_check():
fr = h2o.H2OFrame([[r] for r in range(1,1001)])
runif1 = fr[0].runif(1234)
runif2 = fr[0].runif(1234)
runif3 = fr[0].runif(42)
assert (runif1 == runif2).all(), "Expected runif with the same seeds to return the same values."
assert not (runif1 == runif3).all(), "Expected runif with different seeds to return different values."
if __name__ == "__main__":
pyunit_utils.standalone_test(runif_check)
else:
runif_check()
|
h2o-py/tests/testdir_munging/binop/pyunit_mod.py | ahmedengu/h2o-3 | 6,098 | 12769055 | <gh_stars>1000+
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def frame_as_list():
prostate = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate.csv.zip"))
(prostate % 10).show()
(prostate[4] % 10).show()
airlines = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/allyears2k_headers.zip"))
(airlines["CRSArrTime"] % 100).show()
if __name__ == "__main__":
pyunit_utils.standalone_test(frame_as_list)
else:
frame_as_list()
|
bitex/api/REST/response.py | ligggooo/quant2018 | 312 | 12769072 | # Import Third-Party
from requests import Response
class APIResponse(Response):
def __init__(self, req_response, formatted_json=None):
for k, v in req_response.__dict__.items():
self.__dict__[k] = v
self._formatted = formatted_json
@property
def formatted(self):
return self._formatted
@formatted.setter
def formatted(self, value):
self._formatted = value
if __name__ == '__main__':
from bitex import Kraken
k = Kraken()
resp = k.ticker('XXBTZEUR')
print(resp.formatted)
print(resp.json()) |
motrackers/detectors/__init__.py | timseifer/mixed_motion_detection | 570 | 12769082 | from motrackers.detectors.tf import TF_SSDMobileNetV2
from motrackers.detectors.caffe import Caffe_SSDMobileNet
from motrackers.detectors.yolo import YOLOv3
|
tests/orm/nodes/data/test_array_bands.py | azadoks/aiida-core | 180 | 12769083 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=redefined-outer-name
"""Tests for the :mod:`aiida.orm.nodes.data.array.bands` module."""
from argparse import Namespace
import pytest
from aiida.common.exceptions import NotExistent
from aiida.orm import BandsData, Group, User
from aiida.orm.nodes.data.array.bands import get_bands_and_parents_structure
@pytest.fixture
def alternate_user():
"""Return an alternate ``User`` instance that is not the current default user."""
email = 'alternate<EMAIL>'
try:
return User.objects.get(email=email)
except NotExistent:
return User(email='alternate<EMAIL>').store()
class TestGetBandsAndParentsStructure:
"""Tests for the :meth:`~aiida.orm.nodes.data.array.bands.get_bands_and_parents_structure` function."""
@staticmethod
def _get_default_ns():
"""Returns a simple template Namespace"""
args = Namespace()
args.element = None
args.element_only = None
args.formulamode = None
args.past_days = None
args.group_name = None
args.group_pk = None
args.all_users = False
return args
@pytest.mark.parametrize('all_users, expected', ((True, [True, True]), (False, [True, False])))
@pytest.mark.usefixtures('clear_database_before_test')
def test_all_users(self, alternate_user, all_users, expected):
"""Test the behavior for the ``all_users`` argument."""
bands_default_user = BandsData().store()
bands_alternate_user = BandsData(user=alternate_user).store()
bands = [bands_default_user, bands_alternate_user]
args = self._get_default_ns()
args.all_users = all_users
entries = get_bands_and_parents_structure(args)
node_pks = [int(e[0]) for e in entries]
assert [node.pk in node_pks for node in bands] == expected
@pytest.mark.parametrize('argument, attribute', (('group_name', 'label'), ('group_pk', 'pk')))
@pytest.mark.usefixtures('clear_database_before_test')
def test_identifier(self, argument, attribute):
"""Test the behavior for the ``group_name`` and ``group_pk`` arguments."""
bands_data_grouped = BandsData().store()
_ = BandsData().store()
bands_group = Group('some_bands_data').store()
bands_group.add_nodes(bands_data_grouped)
args = self._get_default_ns()
setattr(args, argument, [getattr(bands_group, attribute)])
entries = get_bands_and_parents_structure(args)
assert [int(e[0]) for e in entries] == [bands_data_grouped.pk]
|
datapackage_pipelines/specs/hashers/hash_calculator.py | gperonato/datapackage-pipelines | 109 | 12769115 | <filename>datapackage_pipelines/specs/hashers/hash_calculator.py
import hashlib
from ...utilities.extended_json import json
from ..parsers.base_parser import PipelineSpec
from ..errors import SpecError
from .dependency_resolver import resolve_dependencies
class HashCalculator(object):
def __init__(self):
self.all_pipeline_ids = {}
def calculate_hash(self, spec: PipelineSpec, status_mgr, ignore_missing_deps=False):
cache_hash = None
if spec.pipeline_id in self.all_pipeline_ids:
message = 'Duplicate key {0} in {1}' \
.format(spec.pipeline_id, spec.path)
spec.validation_errors.append(SpecError('Duplicate Pipeline Id', message))
else:
if ignore_missing_deps:
cache_hash = ''
else:
cache_hash = resolve_dependencies(spec, self.all_pipeline_ids, status_mgr)
self.all_pipeline_ids[spec.pipeline_id] = spec
if len(spec.validation_errors) > 0:
return cache_hash
for step in spec.pipeline_details['pipeline']:
m = hashlib.md5()
m.update(cache_hash.encode('ascii'))
with open(step['executor'], 'rb') as f:
m.update(f.read())
m.update(json.dumps(step, ensure_ascii=True, sort_keys=True)
.encode('ascii'))
cache_hash = m.hexdigest()
step['_cache_hash'] = cache_hash
spec.cache_hash = cache_hash
|
cli/src/pcluster/cli/commands/configure/command.py | maclema/aws-parallelcluster | 279 | 12769147 | # Copyright 2013-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'LICENSE.txt' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=import-outside-toplevel
from typing import List
import argparse
from argparse import Namespace
from pcluster.cli.commands.common import CliCommand
class ConfigureCommand(CliCommand):
"""Implement pcluster configure command."""
# CLI
name = "configure"
help = "Start the AWS ParallelCluster configuration."
description = help
def __init__(self, subparsers):
super().__init__(subparsers, name=self.name, help=self.help, description=self.description)
def register_command_args(self, parser: argparse.ArgumentParser) -> None: # noqa: D102
parser.add_argument("-c", "--config", help="Path to output the generated config file.", required=True)
def execute(self, args: Namespace, extra_args: List[str]) -> None: # noqa: D102 #pylint: disable=unused-argument
from pcluster.cli.commands.configure.easyconfig import configure
configure(args)
|
src/mylib/lgb/util.py | murez/mobile-semantic-segmentation | 713 | 12769184 | <reponame>murez/mobile-semantic-segmentation
from typing import List
import pandas as pd
from lightgbm import Booster
def make_imp_df(boosters: List[Booster]) -> pd.DataFrame:
df = pd.concat([
pd.DataFrame({'name': b.feature_name(), 'importance': b.feature_importance()})
for b in boosters
])
return df.groupby('name').mean() \
.sort_values('importance') \
.reset_index(level='name') \
.set_index('name')
|
DyCommon/Ui/DyStatsDataFrameTableWidget.py | Leonardo-YXH/DevilYuan | 135 | 12769188 | <filename>DyCommon/Ui/DyStatsDataFrameTableWidget.py
from PyQt5 import QtCore
import pandas as pd
from DyCommon.Ui.DyStatsTableWidget import *
class DyStatsDataFrameTableWidget(DyStatsTableWidget):
"""
只显示DF的列,index需要用户自己转换成列
"""
def __init__(self, df, parent=None):
super().__init__(parent=parent, readOnly=True, index=False, floatCut=True, autoScroll=False)
self._initDf(df)
def _initDf(self, df):
self.setColNames(list(df.columns))
self.fastAppendRows(df.values.tolist())
|
tests/test_kpm.py | lise1020/pybinding | 159 | 12769200 | <reponame>lise1020/pybinding
import pytest
import numpy as np
import pybinding as pb
from pybinding.repository import graphene, group6_tmd
models = {
'graphene-pristine': [graphene.monolayer(), pb.rectangle(15)],
'graphene-pristine-oversized': [graphene.monolayer(), pb.rectangle(20)],
'graphene-const_potential': [graphene.monolayer(), pb.rectangle(15),
pb.constant_potential(0.5)],
'graphene-magnetic_field': [graphene.monolayer(), pb.rectangle(15),
graphene.constant_magnetic_field(1e3)],
}
@pytest.fixture(scope='module', ids=list(models.keys()), params=models.values())
def model(request):
return pb.Model(*request.param)
ldos_models = {**models, "mos2": [group6_tmd.monolayer_3band("MoS2"), pb.rectangle(6)]}
@pytest.mark.parametrize("params", ldos_models.values(), ids=list(ldos_models.keys()))
def test_ldos(params, baseline, plot_if_fails):
configurations = [
{'matrix_format': "CSR", 'optimal_size': False, 'interleaved': False},
{'matrix_format': "CSR", 'optimal_size': True, 'interleaved': False},
{'matrix_format': "CSR", 'optimal_size': False, 'interleaved': True},
{'matrix_format': "ELL", 'optimal_size': True, 'interleaved': True},
]
model = pb.Model(*params)
kernel = pb.lorentz_kernel()
strategies = [pb.kpm(model, kernel=kernel, silent=True, **c) for c in configurations]
energy = np.linspace(0, 2, 25)
results = [kpm.calc_ldos(energy, broadening=0.15, position=[0, 0.07], reduce=False)
for kpm in strategies]
expected = results[0].with_data(baseline(results[0].data.astype(np.float32)))
for i in range(len(results)):
plot_if_fails(results[i], expected, 'plot', label=i)
for result in results:
assert pytest.fuzzy_equal(result, expected, rtol=1e-3, atol=1e-6)
def test_moments(model, plot_if_fails):
energy = np.linspace(0, 2, 25)
broadening = 0.15
position = dict(position=[0, 0], sublattice="A")
kpm = pb.kpm(model, silent=True)
expected_ldos = kpm.calc_ldos(energy, broadening, **position)
def manual_ldos():
idx = model.system.find_nearest(**position)
alpha = np.zeros(model.hamiltonian.shape[0])
alpha[idx] = 1
a, b = kpm.scaling_factors
num_moments = kpm.kernel.required_num_moments(broadening / a)
moments = kpm.moments(num_moments, alpha)
ns = np.arange(num_moments)
scaled_energy = (energy - b) / a
k = 2 / (a * np.pi * np.sqrt(1 - scaled_energy**2))
chebyshev = np.cos(ns * np.arccos(scaled_energy[:, np.newaxis]))
return k * np.sum(moments.real * chebyshev, axis=1)
ldos = expected_ldos.with_data(manual_ldos())
plot_if_fails(ldos, expected_ldos, "plot")
assert pytest.fuzzy_equal(ldos, expected_ldos, rtol=1e-4, atol=1e-6)
with pytest.raises(RuntimeError) as excinfo:
kpm.moments(10, [1, 2, 3])
assert "Size mismatch" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
kpm = pb.kpm(pb.Model(graphene.monolayer()))
kpm.moments(10, [1j, 2j])
assert "Hamiltonian is real, but the given argument 'alpha' is complex" in str(excinfo.value)
def test_kpm_multiple_indices(model):
"""KPM can take a vector of column indices and return the Green's function for all of them"""
kpm = pb.kpm(model, silent=True)
num_sites = model.system.num_sites
i, j = num_sites // 2, num_sites // 4
energy = np.linspace(-0.3, 0.3, 10)
broadening = 0.8
cols = [j, j + 1, j + 2]
gs = kpm.calc_greens(i, cols, energy, broadening)
assert len(gs) == len(cols)
g = kpm.calc_greens(j, i, energy, broadening)
assert pytest.fuzzy_equal(gs[0], g)
def test_kpm_reuse():
"""KPM should return the same result when a single object is used for multiple calculations"""
model = pb.Model(graphene.monolayer(), graphene.hexagon_ac(10))
kpm = pb.kpm(model, silent=True)
energy = np.linspace(-5, 5, 50)
broadening = 0.1
for position in [0, 0], [6, 0]:
actual = kpm.calc_ldos(energy, broadening, position)
expected = pb.kpm(model).calc_ldos(energy, broadening, position)
assert pytest.fuzzy_equal(actual, expected, rtol=1e-3, atol=1e-6)
def test_ldos_sublattice():
"""LDOS for A and B sublattices should be antisymmetric for graphene with a mass term"""
model = pb.Model(graphene.monolayer(), graphene.hexagon_ac(10), graphene.mass_term(1))
kpm = pb.kpm(model, silent=True)
a, b = (kpm.calc_ldos(np.linspace(-5, 5, 50), 0.1, [0, 0], sub) for sub in ('A', 'B'))
assert pytest.fuzzy_equal(a.data, b.data[::-1], rtol=1e-3, atol=1e-6)
def test_optimized_hamiltonian():
"""Currently available only in internal interface"""
from pybinding import _cpp
model = pb.Model(graphene.monolayer(), graphene.hexagon_ac(10))
h = model.hamiltonian
oh = _cpp.OptimizedHamiltonian(model.raw_hamiltonian, 0)
assert oh.matrix.shape == h.shape
assert oh.sizes[-1] == h.shape[0]
assert len(oh.indices) == h.shape[0]
dos_models = {
'graphene-const_potential': [graphene.monolayer(), pb.rectangle(25),
pb.constant_potential(0.5)],
'graphene-magnetic_field': [graphene.monolayer(), pb.rectangle(25),
graphene.constant_magnetic_field(1e3)],
}
@pytest.mark.parametrize("params", dos_models.values(), ids=list(dos_models.keys()))
def test_dos(params, baseline, plot_if_fails):
configurations = [
{'matrix_format': "ELL", 'optimal_size': False, 'interleaved': False},
{'matrix_format': "ELL", 'optimal_size': True, 'interleaved': True},
]
model = pb.Model(*params)
kernel = pb.lorentz_kernel()
strategies = [pb.kpm(model, kernel=kernel, silent=True, **c) for c in configurations]
energy = np.linspace(0, 2, 25)
results = [kpm.calc_dos(energy, broadening=0.15) for kpm in strategies]
expected = results[0].with_data(baseline(results[0].data.astype(np.float32)))
for i in range(len(results)):
plot_if_fails(results[i], expected, 'plot', label=i)
for result in results:
assert pytest.fuzzy_equal(result, expected, rtol=1e-3, atol=1e-6)
cond_models = {
'graphene-const_potential': [graphene.monolayer(), pb.rectangle(20),
pb.constant_potential(0.5)],
'graphene-magnetic_field': [graphene.monolayer(), pb.rectangle(20),
graphene.constant_magnetic_field(1e3)]
}
@pytest.mark.parametrize("params", cond_models.values(), ids=list(cond_models.keys()))
def test_conductivity(params, baseline, plot_if_fails):
configurations = [
{'matrix_format': "ELL", 'optimal_size': False, 'interleaved': False},
{'matrix_format': "ELL", 'optimal_size': True, 'interleaved': True},
]
model = pb.Model(*params)
kernel = pb.lorentz_kernel()
strategies = [pb.kpm(model, energy_range=[-9, 9], kernel=kernel, silent=True, **c)
for c in configurations]
energy = np.linspace(-2, 2, 25)
results = [kpm.calc_conductivity(energy, broadening=0.5, temperature=0, num_points=200)
for kpm in strategies]
expected = results[0].with_data(baseline(results[0].data.astype(np.float32)))
for i in range(len(results)):
plot_if_fails(results[i], expected, "plot", label=i)
for result in results:
assert pytest.fuzzy_equal(result, expected, rtol=1e-2, atol=1e-5)
|
src/plugins_/cfdocs/__init__.py | jcberquist/sublimetext-cfml | 130 | 12769234 | <filename>src/plugins_/cfdocs/__init__.py
from .. import plugin
from .cfdocs import (
get_inline_documentation,
get_completion_docs,
get_goto_cfml_file
)
class CFMLPlugin(plugin.CFMLPlugin):
def get_completion_docs(self, cfml_view):
return get_completion_docs(cfml_view)
def get_inline_documentation(self, cfml_view, doc_type):
return get_inline_documentation(cfml_view, doc_type)
def get_goto_cfml_file(self, cfml_view):
return get_goto_cfml_file(cfml_view)
|
networks/model.py | HighCWu/anime_biggan_toy | 140 | 12769244 | <reponame>HighCWu/anime_biggan_toy
import numpy as np
import paddle.fluid as fluid
from paddle.fluid import layers, dygraph as dg
from paddle.fluid.initializer import Normal, Constant, Uniform
class ModelCache(object):
G = None
D = None
train_mode = False
initialized = False
model_cache = ModelCache
def unpool(value):
"""Unpooling operation.
N-dimensional version of the unpooling operation from
https://www.robots.ox.ac.uk/~vgg/rg/papers/Dosovitskiy_Learning_to_Generate_2015_CVPR_paper.pdf
Taken from: https://github.com/tensorflow/tensorflow/issues/2169
Args:
value: a Tensor of shape [b, d0, d1, ..., dn, ch]
name: name of the op
Returns:
A Tensor of shape [b, 2*d0, 2*d1, ..., 2*dn, ch]
"""
value = layers.transpose(value, [0,2,3,1])
sh = value.shape
dim = len(sh[1:-1])
out = (layers.reshape(value, [-1] + sh[-dim:]))
for i in range(dim, 0, -1):
out = layers.concat([out, layers.zeros_like(out)], i)
out_size = [-1] + [s * 2 for s in sh[1:-1]] + [sh[-1]]
out = layers.reshape(out, out_size)
out = layers.transpose(out, [0,3,1,2])
return out
class ReLU(dg.Layer):
def forward(self, x):
return layers.relu(x)
class SoftMax(dg.Layer):
def __init__(self, **kwargs):
super().__init__()
self.kwargs = kwargs
def forward(self, x):
return layers.softmax(x, **self.kwargs)
class BatchNorm(dg.BatchNorm): # not trainable
def __init__(self, *args, **kwargs):
if 'affine' in kwargs:
affine = kwargs.pop('affine')
else:
affine = True
super().__init__(*args, **kwargs)
self._use_global_stats = True
if not affine:
weight = (self.weight * 0 + 1).detach()
bias = (self.bias * 0).detach()
del self._parameters['bias']
del self._parameters['weight']
self.weight = weight
self.bias = bias
self.weight.stop_gradient = True
self.bias.stop_gradient = True
self.accumulated_mean = self.create_parameter(shape=[args[0]], default_initializer=Constant(0.0))
self.accumulated_var = self.create_parameter(shape=[args[0]], default_initializer=Constant(0.0))
self.accumulated_counter = self.create_parameter(shape=[1], default_initializer=Constant(1e-12))
self.accumulated_mean.stop_gradient = True
self.accumulated_var.stop_gradient = True
self.accumulated_counter.stop_gradient = True
def forward(self, inputs, *args, **kwargs):
if '_mean' in self._parameters:
del self._parameters['_mean']
if '_variance' in self._parameters:
del self._parameters['_variance']
if not model_cache.initialized and not model_cache.train_mode:
self._mean = (self.accumulated_mean / self.accumulated_counter)
self._variance = (self.accumulated_var / self.accumulated_counter)
if model_cache.train_mode:
axes = [0] + ([] if len(inputs.shape) == 2 else list(range(2,len(inputs.shape))))
_mean = layers.reduce_mean(inputs, axes, keep_dim=True)
self._mean = layers.reduce_mean(inputs, axes, keep_dim=False)
self._variance = layers.reduce_mean((inputs-_mean)**2, axes)
else:
self._mean = self._mean.detach()
self._variance = self._variance.detach()
return super().forward(inputs, *args, **kwargs)
class SpectralNorm(dg.Layer): # not trainable
def __init__(self, module, name='weight', power_iterations=2):
super().__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
self.initialized = False
if not self._made_params():
self._make_params()
def _update_u(self):
w = self.weight
u = self.weight_u
if len(w.shape) == 4:
_w = layers.transpose(w, [2,3,1,0])
_w = layers.reshape(_w, [-1, _w.shape[-1]])
else:
_w = layers.reshape(w, [-1, w.shape[-1]])
_w = layers.reshape(_w, [-1, _w.shape[-1]])
singular_value = "left" if _w.shape[0] <= _w.shape[1] else "right"
norm_dim = 0 if _w.shape[0] <= _w.shape[1] else 1
for _ in range(self.power_iterations):
if singular_value == "left":
v = layers.l2_normalize(layers.matmul(_w, u, transpose_x=True), axis=norm_dim)
u = layers.l2_normalize(layers.matmul(_w, v), axis=norm_dim)
else:
v = layers.l2_normalize(layers.matmul(u, _w, transpose_y=True), axis=norm_dim)
u = layers.l2_normalize(layers.matmul(v, _w), axis=norm_dim)
if singular_value == "left":
sigma = layers.matmul(layers.matmul(u, _w, transpose_x=True), v)
else:
sigma = layers.matmul(layers.matmul(v, _w), u, transpose_y=True)
_w = w / sigma.detach()
setattr(self.module, self.name, _w.detach()) # setattr(self.module, self.name, _w)
# self.weight_u.set_value(u)
def _made_params(self):
try:
self.weight
self.weight_u
return True
except AttributeError:
return False
def _make_params(self):
# paddle linear weight is similar with tf's, and conv weight is similar with pytorch's.
w = getattr(self.module, self.name)
if len(w.shape) == 4:
_w = layers.transpose(w, [2,3,1,0])
_w = layers.reshape(_w, [-1, _w.shape[-1]])
else:
_w = layers.reshape(w, [-1, w.shape[-1]])
singular_value = "left" if _w.shape[0] <= _w.shape[1] else "right"
norm_dim = 0 if _w.shape[0] <= _w.shape[1] else 1
u_shape = (_w.shape[0], 1) if singular_value == "left" else (1, _w.shape[-1])
u = self.create_parameter(shape=u_shape, default_initializer=Normal(0, 1))
u.stop_gradient = True
u.set_value(layers.l2_normalize(u, axis=norm_dim))
del self.module._parameters[self.name]
self.add_parameter("weight", w)
self.add_parameter("weight_u", u)
def forward(self, *args, **kwargs):
if not self.initialized:
self._update_u()
self.initialized = True
return self.module.forward(*args, **kwargs)
class SelfAttention(dg.Layer):
def __init__(self, in_dim, activation=layers.relu):
super().__init__()
self.chanel_in = in_dim
self.activation = activation
self.theta = SpectralNorm(dg.Conv2D(in_dim, in_dim // 8, 1, bias_attr=False))
self.phi = SpectralNorm(dg.Conv2D(in_dim, in_dim // 8, 1, bias_attr=False))
self.pool = dg.Pool2D(2, 'max', 2)
self.g = SpectralNorm(dg.Conv2D(in_dim, in_dim // 2, 1, bias_attr=False))
self.o_conv = SpectralNorm(dg.Conv2D(in_dim // 2, in_dim, 1, bias_attr=False))
self.gamma = self.create_parameter([1,], default_initializer=Constant(0.0))
self.softmax = SoftMax(axis=-1)
def forward(self, x):
m_batchsize, C, width, height = x.shape
N = height * width
theta = self.theta(x)
phi = self.phi(x)
phi = self.pool(phi)
phi = layers.reshape(phi,(m_batchsize, -1, N // 4))
theta = layers.reshape(theta,(m_batchsize, -1, N))
theta = layers.transpose(theta,(0, 2, 1))
attention = self.softmax(layers.bmm(theta, phi))
g = self.g(x)
g = layers.reshape(self.pool(g),(m_batchsize, -1, N // 4))
attn_g = layers.reshape(layers.bmm(g, layers.transpose(attention,(0, 2, 1))),(m_batchsize, -1, width, height))
out = self.o_conv(attn_g)
return self.gamma * out + x
class ConditionalBatchNorm(dg.Layer):
def __init__(self, num_features, num_classes, epsilon=1e-5, momentum=0.1):
super().__init__()
self.bn_in_cond = BatchNorm(num_features, affine=False, epsilon=epsilon, momentum=momentum)
self.gamma_embed = SpectralNorm(dg.Linear(num_classes, num_features, bias_attr=False))
self.beta_embed = SpectralNorm(dg.Linear(num_classes, num_features, bias_attr=False))
def forward(self, x, y):
out = self.bn_in_cond(x)
if isinstance(y, list):
gamma, beta = y
out = layers.reshape(gamma, (0, 0, 1, 1)) * out + layers.reshape(beta, (0, 0, 1, 1))
return out
gamma = self.gamma_embed(y)
beta = self.beta_embed(y)
out = layers.reshape(gamma, (0, 0, 1, 1)) * out + layers.reshape(beta, (0, 0, 1, 1))
return out
class ResBlock(dg.Layer):
def __init__(
self,
in_channel,
out_channel,
kernel_size=[3, 3],
padding=1,
stride=1,
n_class=None,
conditional=True,
activation=layers.relu,
upsample=True,
downsample=False,
z_dim=128,
use_attention=False,
skip_proj=None
):
super().__init__()
if conditional:
self.cond_norm1 = ConditionalBatchNorm(in_channel, z_dim)
self.conv0 = SpectralNorm(
dg.Conv2D(in_channel, out_channel, kernel_size, stride, padding)
)
if conditional:
self.cond_norm2 = ConditionalBatchNorm(out_channel, z_dim)
self.conv1 = SpectralNorm(
dg.Conv2D(out_channel, out_channel, kernel_size, stride, padding)
)
self.skip_proj = False
if skip_proj is not True and (upsample or downsample):
self.conv_sc = SpectralNorm(dg.Conv2D(in_channel, out_channel, 1, 1, 0))
self.skip_proj = True
if use_attention:
self.attention = SelfAttention(out_channel)
self.upsample = upsample
self.downsample = downsample
self.activation = activation
self.conditional = conditional
self.use_attention = use_attention
def forward(self, input, condition=None):
out = input
if self.conditional:
out = self.cond_norm1(out, condition[0] if isinstance(condition, list) else condition)
out = self.activation(out)
if self.upsample:
out = unpool(out)
out = self.conv0(out)
if self.conditional:
out = self.cond_norm2(out, condition[1] if isinstance(condition, list) else condition)
out = self.activation(out)
out = self.conv1(out)
if self.downsample:
out = layers.pool2d(out, 2, pool_type='avg', pool_stride=2)
if self.skip_proj:
skip = input
if self.upsample:
skip = unpool(skip)
skip = self.conv_sc(skip)
if self.downsample:
skip = layers.pool2d(skip, 2, pool_type='avg', pool_stride=2)
out = out + skip
else:
skip = input
if self.use_attention:
out = self.attention(out)
return out
class Generator(dg.Layer): # not trainable
def __init__(self, code_dim=128, n_class=1000, chn=96, blocks_with_attention="B4", resolution=512):
super().__init__()
def GBlock(in_channel, out_channel, n_class, z_dim, use_attention):
return ResBlock(in_channel, out_channel, n_class=n_class, z_dim=z_dim, use_attention=use_attention)
self.embed_y = dg.Linear(n_class, 128, bias_attr=False)
self.chn = chn
self.resolution = resolution
self.blocks_with_attention = set(blocks_with_attention.split(","))
self.blocks_with_attention.discard('')
gblock = []
in_channels, out_channels = self.get_in_out_channels()
self.num_split = len(in_channels) + 1
z_dim = code_dim//self.num_split + 128
self.noise_fc = SpectralNorm(dg.Linear(code_dim//self.num_split, 4 * 4 * in_channels[0]))
self.sa_ids = [int(s.split('B')[-1]) for s in self.blocks_with_attention]
for i, (nc_in, nc_out) in enumerate(zip(in_channels, out_channels)):
gblock.append(GBlock(nc_in, nc_out, n_class=n_class, z_dim=z_dim, use_attention=(i+1) in self.sa_ids))
self.blocks = dg.LayerList(gblock)
self.output_layer_bn = BatchNorm(1 * chn, epsilon=1e-5)
self.output_layer_conv = SpectralNorm(dg.Conv2D(1 * chn, 3, [3, 3], padding=1))
def get_in_out_channels(self):
resolution = self.resolution
if resolution == 1024:
channel_multipliers = [16, 16, 8, 8, 4, 2, 1, 1, 1]
elif resolution == 512:
channel_multipliers = [16, 16, 8, 8, 4, 2, 1, 1]
elif resolution == 256:
channel_multipliers = [16, 16, 8, 8, 4, 2, 1]
elif resolution == 128:
channel_multipliers = [16, 16, 8, 4, 2, 1]
elif resolution == 64:
channel_multipliers = [16, 16, 8, 4, 2]
elif resolution == 32:
channel_multipliers = [4, 4, 4, 4]
else:
raise ValueError("Unsupported resolution: {}".format(resolution))
in_channels = [self.chn * c for c in channel_multipliers[:-1]]
out_channels = [self.chn * c for c in channel_multipliers[1:]]
return in_channels, out_channels
def forward(self, input, class_id, input_class_emb=False):
if isinstance(input, list):
codes = [input[0]]
codes += [input[2*i+1:2*i+3] for i in range(len(input)//2)]
else:
codes = layers.split(input, self.num_split, 1)
if not input_class_emb:
class_emb = self.embed_y(class_id) # 128
else:
class_emb = class_id
out = self.noise_fc(codes[0])
out = layers.transpose(layers.reshape(out,(out.shape[0], 4, 4, -1)),(0, 3, 1, 2))
for i, (code, gblock) in enumerate(zip(codes[1:], self.blocks)):
if isinstance(input, list):
condition = [layers.concat([c, class_emb], 1) for c in code]
else:
condition = layers.concat([code, class_emb], 1)
out = gblock(out, condition)
out = self.output_layer_bn(out)
out = layers.relu(out)
out = self.output_layer_conv(out)
return (layers.tanh(out) + 1) / 2
class Discriminator(dg.Layer):
def __init__(self, n_class=1000, chn=96, blocks_with_attention="B2", resolution=256):
super().__init__()
def DBlock(in_channel, out_channel, downsample=True, use_attention=False, skip_proj=None):
return ResBlock(in_channel, out_channel, conditional=False, upsample=False,
downsample=downsample, use_attention=use_attention, skip_proj=skip_proj)
self.chn = chn
self.colors = 3
self.resolution = resolution
self.blocks_with_attention = set(blocks_with_attention.split(","))
self.blocks_with_attention.discard('')
dblock = []
in_channels, out_channels = self.get_in_out_channels()
self.sa_ids = [int(s.split('B')[-1]) for s in self.blocks_with_attention]
for i, (nc_in, nc_out) in enumerate(zip(in_channels[:-1], out_channels[:-1])):
dblock.append(DBlock(nc_in, nc_out, downsample=True,
use_attention=(i+1) in self.sa_ids, skip_proj=nc_in==nc_out))
dblock.append(DBlock(in_channels[-1], out_channels[-1], downsample=False,
use_attention=len(out_channels) in self.sa_ids, skip_proj=in_channels[-1]==out_channels[-1]))
self.blocks = dg.LayerList(dblock)
self.final_fc = SpectralNorm(dg.Linear(16 * chn, 1))
self.embed_y = dg.Embedding(size=[n_class, 16 * chn], is_sparse=False, param_attr=Uniform(-0.1,0.1))
self.embed_y = SpectralNorm(self.embed_y)
def get_in_out_channels(self):
colors = self.colors
resolution = self.resolution
if resolution == 1024:
channel_multipliers = [1, 1, 1, 2, 4, 8, 8, 16, 16]
elif resolution == 512:
channel_multipliers = [1, 1, 2, 4, 8, 8, 16, 16]
elif resolution == 256:
channel_multipliers = [1, 2, 4, 8, 8, 16, 16]
elif resolution == 128:
channel_multipliers = [1, 2, 4, 8, 16, 16]
elif resolution == 64:
channel_multipliers = [2, 4, 8, 16, 16]
elif resolution == 32:
channel_multipliers = [2, 2, 2, 2]
else:
raise ValueError("Unsupported resolution: {}".format(resolution))
out_channels = [self.chn * c for c in channel_multipliers]
in_channels = [colors] + out_channels[:-1]
return in_channels, out_channels
def forward(self, input, class_id=None):
out = input
features = []
for i, dblock in enumerate(self.blocks):
out = dblock(out)
features.append(out)
out = layers.relu(out)
out = layers.reduce_sum(out, [2,3])
out_linear = self.final_fc(out)
if class_id is None:
prod = 0
else:
class_emb = self.embed_y(class_id)
prod = layers.reduce_sum((class_emb * out), 1, keep_dim=True)
return layers.sigmoid(out_linear + prod), features
|
pytorch_lightning/utilities/signature_utils.py | mathemusician/pytorch-lightning | 3,469 | 12769266 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Callable, Optional
def is_param_in_hook_signature(
hook_fx: Callable, param: str, explicit: bool = False, min_args: Optional[int] = None
) -> bool:
"""
Args:
hook_fx: the hook callable
param: the name of the parameter to check
explicit: whether the parameter has to be explicitly declared
min_args: whether the `signature` as at least `min_args` parameters
"""
parameters = inspect.getfullargspec(hook_fx)
args = parameters.args[1:] # ignore `self`
return (
param in args
or (not explicit and (parameters.varargs is not None))
or (isinstance(min_args, int) and len(args) >= min_args)
)
|
catalogs/views.py | paulsuh/mwa2 | 155 | 12769307 | <filename>catalogs/views.py
"""
catalogs//views.py
"""
from django.http import HttpResponse
from catalogs.models import Catalog
import json
import logging
LOGGER = logging.getLogger('munkiwebadmin')
def catalog_view(request):
'''Returns list of catalog names in JSON format'''
catalog_list = Catalog.list()
LOGGER.debug("Got request for catalog names")
return HttpResponse(json.dumps(catalog_list),
content_type='application/json')
def json_catalog_data(request):
'''Returns complied and sorted catalog data in JSON format'''
LOGGER.debug("Got request for catalog data")
return HttpResponse(json.dumps(Catalog.catalog_info()),
content_type='application/json')
def get_pkg_ref_count(request, pkg_path):
'''Returns the number of pkginfo files referencing a given pkg_path'''
LOGGER.debug("Got request for pkg ref count for %s", pkg_path)
return HttpResponse(json.dumps(Catalog.get_pkg_ref_count(pkg_path)),
content_type='application/json')
|
hypatia/physics.py | defcon201/hypatia-engine | 251 | 12769308 | <filename>hypatia/physics.py<gh_stars>100-1000
"""Physical attributes of things.
Right now, not much differs it from the constants
module, but there will surely be much more to do
with physics as time progresses.
See Also:
:mod:`constants`
"""
import pygame
from hypatia import constants
class Velocity(object):
"""Eight-directional velocity."""
def __init__(self, x=0, y=0):
"""Speed in pixels per second per axis. Values may be negative.
Args:
x (int|None): --
y (int|None): --
"""
self.x = x
self.y = y
# this really isn't used, yet
class Position(object):
"""The position of an object.
Scaffolding.
"""
def __init__(self, x, y, size):
"""Extrapolate position info from supplied info.
Args:
x (int|float): how many pixels from the left of the scene.
y (int|float): how many pixels from the top of the scene.
size (tuple): (x, y) pixel dimensions of object being
represented.
"""
self.rect = pygame.Rect((x, y), size)
self.float = (float(x), float(y))
self.int = (x, y)
class AbsolutePosition(Position):
"""The absolute pixel coordinate in regard to the scene.
Scaffolding.
"""
pass
|
trigger/acl/tools.py | jccardonar/trigger | 380 | 12769350 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Various tools for use in scripts or other modules. Heavy lifting from tools
that have matured over time have been moved into this module.
"""
__author__ = '<NAME>, <NAME>'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__copyright__ = 'Copyright 2010-2011, AOL Inc.'
from collections import defaultdict
import datetime
import IPy
import os
import re
import sys
import tempfile
from trigger.acl.parser import *
from trigger.conf import settings
# Defaults
DEBUG = False
DATE_FORMAT = "%Y-%m-%d"
DEFAULT_EXPIRE = 6 * 30 # 6 months
# Exports
__all__ = ('create_trigger_term', 'create_access', 'check_access', 'ACLScript',
'process_bulk_loads', 'get_bulk_acls', 'get_comment_matches',
'write_tmpacl', 'diff_files', 'worklog', 'insert_term_into_acl',
'create_new_acl')
# Functions
def create_trigger_term(source_ips=[],
dest_ips=[],
source_ports=[],
dest_ports=[],
protocols=[],
action=['accept'],
name="generated_term"):
"""Constructs & returns a Term object from constituent parts."""
term = Term()
term.action = action
term.name = name
for key, data in {'source-address': source_ips,
'destination-address': dest_ips,
'source-port': source_ports,
'destination-port': dest_ports,
'protocol': protocols}.iteritems():
for n in data:
if key in term.match:
term.match[key].append(n)
else:
term.match[key] = [n]
return term
def check_access(terms_to_check, new_term, quiet=True, format='junos',
acl_name=None):
"""
Determine whether access is permitted by a given ACL (list of terms).
Tests a new term against a list of terms. Return True if access in new term
is permitted, or False if not.
Optionally displays the terms that apply and what edits are needed.
:param terms_to_check:
A list of Term objects to check
:param new_term:
The Term object used for the access test
:param quiet:
Toggle whether output is displayed
:param format:
The ACL format to use for output display
:param acl_name:
The ACL name to use for output display
"""
permitted = None
matches = {
'source-address': new_term.match.get('source-address',[]),
'destination-address': new_term.match.get('destination-address',[]),
'protocol': new_term.match.get('protocol',[]),
'destination-port': new_term.match.get('destination-port',[]),
'source-port': new_term.match.get('source-port',[]),
}
def _permitted_in_term(term, comment=' check_access: PERMITTED HERE'):
"""
A little closure to re-use internally that returns a Boolean based
on the given Term object's action.
"""
action = term.action[0]
if action == 'accept':
is_permitted = True
if not quiet:
term.comments.append(Comment(comment))
elif action in ('discard', 'reject'):
is_permitted = False
if not quiet:
print '\n'.join(new_term.output(format, acl_name=acl_name))
else:
is_permitted = None
return is_permitted
for t in terms_to_check:
hit = True
complicated = False
for comment in t.comments:
if 'trigger: make discard' in comment:
t.setaction('discard') #.action[0] = 'discard'
t.makediscard = True # set 'make discard' flag
for k,v in t.match.iteritems():
if k not in matches or not matches[k]:
complicated = True
else:
for test in matches[k]:
if test not in v:
hit = False
break
if hit and not t.inactive:
# Simple access check. Elegant!
if not complicated and permitted is None:
permitted = _permitted_in_term(t)
# Complicated checks should set hit=False unless you want
# them to display and potentially confuse end-users
# TODO (jathan): Factor this into a "better way"
else:
# Does the term have 'port' defined?
if 'port' in t.match:
port_match = t.match.get('port')
match_fields = (matches['destination-port'], matches['source-port'])
# Iterate the fields, and then the ports for each field. If
# one of the port numbers is within port_match, check if
# the action permits/denies and set the permitted flag.
for field in match_fields:
for portnum in field:
if portnum in port_match:
permitted = _permitted_in_term(t)
else:
hit = False
# Other complicated checks would go here...
# If a complicated check happened and was not a hit, skip to the
# next term
if complicated and not hit:
continue
if not quiet:
print '\n'.join(t.output(format, acl_name=acl_name))
return permitted
def create_access(terms_to_check, new_term):
"""
Breaks a new_term up into separate constituent parts so that they can be
compared in a check_access test.
Returns a list of terms that should be inserted.
"""
protos = new_term.match.get('protocol', ['any'])
sources = new_term.match.get('source-address', ['any'])
dests = new_term.match.get('destination-address', ['any'])
sourceports = new_term.match.get('source-port', ['any'])
destports = new_term.match.get('destination-port', ['any'])
ret = []
for proto in protos:
for source in sources:
for sourceport in sourceports:
for dest in dests:
for destport in destports:
t = Term()
if str(proto) != 'any':
t.match['protocol'] = [proto]
if str(source) != 'any':
t.match['source-address'] = [source]
if str(dest) != 'any':
t.match['destination-address'] = [dest]
if str(sourceport) != 'any':
t.match['source-port'] = [sourceport]
if str(destport) != 'any':
t.match['destination-port'] = [destport]
if not check_access(terms_to_check, t):
ret.append(t)
return ret
# note, following code is -not currently used-
def insert_term_into_acl(new_term, aclobj, debug=False):
"""
Return a new ACL object with the new_term added in the proper place based
on the aclobj. Intended to recursively append to an interim ACL object
based on a list of Term objects.
It's safe to assume that this function is incomplete pending better
documentation and examples.
:param new_term:
The Term object to use for comparison against aclobj
:param aclobj:
The original ACL object to use for creation of new_acl
Example::
import copy
# terms_to_be_added is a list of Term objects that is to be added in
# the "right place" into new_acl based on the contents of aclobj
original_acl = parse(open('acl.original'))
new_acl = copy.deepcopy(original_acl) # Dupe the original
for term in terms_to_be_added:
new_acl = generate_new_acl(term, new_acl)
"""
new_acl = ACL() # ACL comes from trigger.acl.parser
new_acl.policers = aclobj.policers
new_acl.format = aclobj.format
new_acl.name = aclobj.name
already_added = False
for c in aclobj.comments:
new_acl.comments.append(c)
# The following logic is almost identical to that of check_access() except
# that it tracks already_added and knows how to handle insertion of terms
# before or after Terms with an action of 'discard' or 'reject'.
for t in aclobj.terms:
hit = True
complicated = False
permitted = None
for k, v in t.match.iteritems():
if debug:
print "generate_new_acl(): k,v==",k,"and",v
if k == 'protocol' and k not in new_term.match:
continue
if k not in new_term.match:
complicated = True
continue
else:
for test in new_term.match[k]:
if test not in v:
hit = False
break
if not hit and k in ('source-port', 'destination-port',
'source-address', 'destination-address'):
# Here is where it gets odd: If we have multiple IPs in this
# new term, and one of them matches in a deny, we must set hit
# to True.
got_match = False
if t.action[0] in ('discard', 'reject'):
for test in new_term.match[k]:
if test in v:
hit = True
# Check whether access in new_term is permitted (a la check_access(),
# track whether it's already been added into new_acl, and then add it
# in the "right place".
if hit and not t.inactive and already_added == False:
if not complicated and permitted is None:
for comment in t.comments:
if 'trigger: make discard' in comment and \
new_term.action[0] == 'accept':
new_acl.terms.append(new_term)
already_added = True
permitted = True
if t.action[0] in ('discard','reject') and \
new_term.action[0] in ('discard','reject'):
permitted = False
elif t.action[0] in ('discard','reject'):
permitted = False
new_acl.terms.append(new_term)
already_added = True
elif t.action[0] == 'accept' and \
new_term.action[0] in ('discard', 'reject'):
permitted = False
new_acl.terms.append(new_term)
already_added = True
elif t.action[0] == 'accept' and \
new_term.action[0] == 'accept':
permitted = True
if debug:
print "PERMITTED?", permitted
# Original term is always appended as we move on
new_acl.terms.append(t)
return new_acl
def create_new_acl(old_file, terms_to_be_added):
"""Given a list of Term objects call insert_term_into_acl() to determine
what needs to be added in based on the contents of old_file. Returns a new
ACL object."""
aclobj = parse(open(old_file)) # Start with the original ACL contents
new_acl = None
for new_term in terms_to_be_added:
new_acl = insert_term_into_acl(new_term, aclobj)
return new_acl
def get_bulk_acls():
"""
Returns a dict of acls with an applied count over settings.AUTOLOAD_BULK_THRESH
"""
from trigger.netdevices import NetDevices
nd = NetDevices()
all_acls = defaultdict(int)
for dev in nd.all():
for acl in dev.acls:
all_acls[acl] += 1
bulk_acls = {}
for acl, count in all_acls.items():
if count >= settings.AUTOLOAD_BULK_THRESH and acl != '':
bulk_acls[acl] = count
return bulk_acls
def process_bulk_loads(work, max_hits=settings.BULK_MAX_HITS_DEFAULT, force_bulk=False):
"""
Formerly "process --ones".
Processes work dict and determines tuple of (prefix, site) for each device. Stores
tuple as a dict key in prefix_hits. If prefix_hits[(prefix, site)] is greater than max_hits,
remove all further matching devices from work dict.
By default if a device has no acls flagged as bulk_acls, it is not removed from the work dict.
Example:
* Device 'foo1-xyz.example.com' returns ('foo', 'xyz') as tuple.
* This is stored as prefix_hits[('foo', 'xyz')] = 1
* All further devices matching that tuple increment the hits for that tuple
* Any devices matching hit counter exceeds max_hits is removed from work dict
You may override max_hits to increase the num. of devices on which to load a bulk acl.
You may pass force_bulk=True to treat all loads as bulk loads.
"""
prefix_pat = re.compile(r'^([a-z]+)\d{0,2}-([a-z0-9]+)')
prefix_hits = defaultdict(int)
import trigger.acl.db as adb
bulk_acls = adb.get_bulk_acls()
nd = adb.get_netdevices()
if DEBUG:
print 'DEVLIST:', sorted(work)
# Sort devices numerically
for dev in sorted(work):
if DEBUG: print 'Doing', dev
#testacls = dev.bulk_acls
#if force_bulk:
# testacls = dev.acls
testacls = dev.acls if force_bulk else dev.bulk_acls
for acl in testacls: #only look at each acl once, but look at all acls if bulk load forced
if acl in work[dev]:
#if acl in work[router]:
if DEBUG: print 'Determining threshold for acl ', acl, ' on device ', dev, '\n'
if acl in settings.BULK_MAX_HITS:
max_hits = settings.BULK_MAX_HITS[acl]
try:
prefix_site = prefix_pat.findall(dev.nodeName)[0]
except IndexError:
continue
# Mark a hit for this tuple, and dump remaining matches
prefix_hits[prefix_site] += 1
if DEBUG: print prefix_site, prefix_hits[prefix_site]
if prefix_hits[prefix_site] > max_hits:
msg = "Removing %s on %s from job queue: threshold of %d exceeded for " \
"'%s' devices in '%s'" % (acl, dev, max_hits, prefix_site[0], prefix_site[1])
print msg
if 'log' in globals():
log.msg(msg)
# Remove that acl from being loaded, but still load on that device
work[dev].remove(acl)
#work[router].remove(acl)
#done with all the devices
return work
def get_comment_matches(aclobj, requests):
"""Given an ACL object and a list of ticket numbers return a list of matching comments."""
matches = set()
for t in aclobj.terms:
for req in requests:
for c in t.comments:
if req in c:
matches.add(t)
#[matches.add(t) for c in t.comments if req in c]
return matches
def update_expirations(matches, numdays=DEFAULT_EXPIRE):
"""Update expiration dates on matching terms. This modifies mutable objects, so use cautiously."""
print 'matching terms:', [term.name for term in matches]
for term in matches:
date = None
for comment in term.comments:
try:
date = re.search(r'(\d{4}\-\d\d\-\d\d)', comment.data).group()
except AttributeError:
#print 'No date match in term: %s, comment: %s' % (term.name, comment)
continue
try:
dstamp = datetime.datetime.strptime(date, DATE_FORMAT)
except ValueError, err:
print 'BAD DATE FOR THIS COMMENT:'
print 'comment:', comment.data
print 'bad date:', date
print err
print 'Fix the date and start the job again!'
import sys
sys.exit()
new_date = dstamp + datetime.timedelta(days=numdays)
#print 'Before:\n' + comment.data + '\n'
print 'Updated date for term: %s' % term.name
comment.data = comment.data.replace(date, datetime.datetime.strftime(new_date, DATE_FORMAT))
#print 'After:\n' + comment.data
def write_tmpacl(acl, process_name='_tmpacl'):
"""Write a temporary file to disk from an Trigger acl.ACL object & return the filename"""
tmpfile = tempfile.mktemp() + process_name
f = open(tmpfile, 'w')
for x in acl.output(acl.format, replace=True):
f.write(x)
f.write('\n')
f.close()
return tmpfile
def diff_files(old, new):
"""Return a unified diff between two files"""
return os.popen('diff -Naur %s %s' % (old, new)).read()
def worklog(title, diff, log_string='updated by express-gen'):
"""Save a diff to the ACL worklog"""
from time import strftime,localtime
from trigger.utils.rcs import RCS
date = strftime('%Y%m%d', localtime())
file = os.path.join(settings.FIREWALL_DIR, 'workdocs', 'workdoc.' + date)
rcs = RCS(file)
if not os.path.isfile(file):
print 'Creating new worklog %s' % file
f = open(file,"w")
f.write("# vi:noai:\n\n")
f.close()
rcs.checkin('.')
print 'inserting the diff into the worklog %s' % file
rcs.lock_loop()
fd = open(file,"a")
fd.write('"%s"\n' % title)
fd.write(diff)
fd.close()
print 'inserting %s into the load queue' % title
rcs.checkin(log_string)
# Use acl to insert into queue, should be replaced with API call
os.spawnlp(os.P_WAIT, 'acl', 'acl', '-i', title)
# Classes
class ACLScript:
"""
Interface to generating or modifying access-lists. Intended for use in
creating command-line utilities using the ACL API.
"""
def __init__(self, acl=None, mode='insert', cmd='acl_script',
show_mods=True, no_worklog=False, no_changes=False):
self.source_ips = []
self.dest_ips = []
self.protocol = []
self.source_ports = []
self.dest_ports = []
self.modify_terms = []
self.bcomments = []
self.tempfiles = []
self.acl = acl
self.cmd = cmd
self.mode = mode
self.show_mods = show_mods
self.no_worklog = no_worklog
self.no_changes = no_changes
def cleanup(self):
for file in self.tempfiles:
os.remove(file)
def genargs(self,interactive=False):
if not self.acl:
raise "need acl defined"
argz = []
argz.append('-a %s' % self.acl)
if self.show_mods:
argz.append('--show-mods')
if self.no_worklog:
argz.append('--no-worklog')
if self.no_changes:
argz.append('--no-changes')
if not interactive:
argz.append('--no-input')
if self.mode == 'insert':
argz.append('--insert-defined')
elif self.mode == 'replace':
argz.append('--replace-defined')
else:
raise "invalid mode"
for k,v in {'--source-address-from-file':self.source_ips,
'--destination-address-from-file':self.dest_ips,
}.iteritems():
if len(v) == 0:
continue
tmpf = tempfile.mktemp() + '_genacl'
self.tempfiles.append(tmpf)
try:
f = open(tmpf,'w')
except:
print "UNABLE TO OPEN TMPFILE"
raise "YIKES!"
for x in v:
f.write('%s\n' % x.strNormal())
f.close()
argz.append('%s %s' % (k,tmpf))
for k,v in {'-p':self.source_ports,
'-P':self.dest_ports}.iteritems():
if not len(v):
continue
for x in v:
argz.append('%s %d' % (k,x))
if len(self.modify_terms) and len(self.bcomments):
print "Can only define either modify_terms or between comments"
raise "Can only define either modify_terms or between comments"
if self.modify_terms:
for x in self.modify_terms:
argz.append('-t %s' % x)
else:
for x in self.bcomments:
(b,e) = x
argz.append('-c "%s" "%s"' % (b,e))
for proto in self.protocol:
argz.append('--protocol %s' % proto)
return argz
def parselog(self, log):
return log
def run(self, interactive=False):
args = self.genargs(interactive=interactive)
log = []
#print self.cmd + ' ' + ' '.join(args)
if interactive:
os.system(self.cmd + ' ' + ' '.join(args))
else:
f = os.popen(self.cmd + ' ' + ' '.join(args))
line = f.readline()
while line:
line = line.rstrip()
log.append(line)
line = f.readline()
return log
def errors_from_log(self, log):
errors = ''
for l in log:
if '%%ERROR%%' in l:
l = l.spit('%%ERROR%%')[1]
errors += l[1:] + '\n'
return errors
def diff_from_log(self, log):
diff = ""
for l in log:
if '%%DIFF%%' in l:
l = l.split('%%DIFF%%')[1]
diff += l[1:] + '\n'
return diff
def set_acl(self, acl):
self.acl=acl
def _add_addr(self, to, src):
if isinstance(src,list):
for x in src:
if IPy.IP(x) not in to:
to.append(IPy.IP(x))
else:
if IPy.IP(src) not in to:
to.append(IPy.IP(src))
def _add_port(self, to, src):
if isinstance(src, list):
for x in src:
if x not in to:
to.append(int(x))
else:
if int(src) not in to:
to.append(int(src))
def add_protocol(self, src):
to = self.protocol
if isinstance(src, list):
for x in src:
if x not in to:
to.append(x)
else:
if src not in to:
to.append(src)
def add_src_host(self, data):
self._add_addr(self.source_ips, data)
def add_dst_host(self, data):
self._add_addr(self.dest_ips, data)
def add_src_port(self, data):
self._add_port(self.source_ports, data)
def add_dst_port(self, data):
self._add_port(self.dest_ports, data)
def add_modify_between_comments(self, begin, end):
del self.modify_terms
self.modify_terms = []
self.bcomments.append((begin,end))
def add_modify_term(self, term):
del self.bcomments
self.bcomments = []
if term not in self.modify_terms:
self.modify_terms.append(term)
def get_protocols(self):
return self.protocol
def get_src_hosts(self):
return self.source_ips
def get_dst_hosts(self):
return self.dest_ips
def get_src_ports(self):
return self.source_ports
def get_dst_ports(self):
return self.dest_ports
|
src/spaczz/customattrs.py | JonasHablitzel/spaczz | 153 | 12769359 | <gh_stars>100-1000
"""Custom spaCy attributes for spaczz."""
from __future__ import annotations
from typing import Iterable, Optional, Set, Tuple, Type
import warnings
from spacy.tokens import Doc, Span, Token
from .exceptions import AttrOverwriteWarning, SpaczzSpanDeprecation
class SpaczzAttrs:
"""Adds spaczz custom attributes to spacy."""
_initialized = False
@classmethod
def initialize(cls: Type[SpaczzAttrs]) -> None:
"""Initializes and registers custom attributes."""
if not cls._initialized:
try:
Token.set_extension("spaczz_token", default=False)
Token.set_extension("spaczz_type", default=None)
Token.set_extension("spaczz_ratio", default=None)
Token.set_extension("spaczz_counts", default=None)
Token.set_extension("spaczz_details", default=None)
Span.set_extension("spaczz_span", getter=cls.get_spaczz_span)
Span.set_extension("spaczz_ent", getter=cls.get_spaczz_ent)
Span.set_extension("spaczz_type", getter=cls.get_span_type)
Span.set_extension("spaczz_types", getter=cls.get_span_types)
Span.set_extension("spaczz_ratio", getter=cls.get_ratio)
Span.set_extension("spaczz_counts", getter=cls.get_counts)
Span.set_extension("spaczz_details", getter=cls.get_details)
Doc.set_extension("spaczz_doc", getter=cls.get_spaczz_doc)
Doc.set_extension("spaczz_types", getter=cls.get_doc_types)
cls._initialized = True
except ValueError:
warnings.warn(
"""One or more spaczz custom extensions has already been registered.
These are being force overwritten. Please avoid defining personal,
custom extensions prepended with "spaczz_".
""",
AttrOverwriteWarning,
)
Token.set_extension("spaczz_token", default=False, force=True)
Token.set_extension("spaczz_type", default=None, force=True)
Token.set_extension("spaczz_ratio", default=None, force=True)
Token.set_extension("spaczz_counts", default=None, force=True)
Span.set_extension(
"spaczz_span", getter=cls.get_spaczz_span, force=True
)
Span.set_extension("spaczz_type", getter=cls.get_span_type, force=True)
Span.set_extension(
"spaczz_types", getter=cls.get_span_types, force=True
)
Span.set_extension("spaczz_ratio", getter=cls.get_ratio, force=True)
Span.set_extension("spaczz_counts", getter=cls.get_counts, force=True)
Doc.set_extension("spaczz_doc", getter=cls.get_spaczz_doc, force=True)
Doc.set_extension("spaczz_types", getter=cls.get_doc_types, force=True)
@staticmethod
def get_spaczz_span(span: Span) -> bool:
"""Getter for spaczz_span `Span` attribute."""
warnings.warn(
"""spaczz_span is deprecated.
Use spaczz_ent instead.""",
SpaczzSpanDeprecation,
)
return all([token._.spaczz_token for token in span])
@staticmethod
def get_spaczz_ent(span: Span) -> bool:
"""Getter for spaczz_ent `Span` attribute."""
return all([token._.spaczz_token for token in span])
@classmethod
def get_span_type(cls: Type[SpaczzAttrs], span: Span) -> Optional[str]:
"""Getter for spaczz_type `Span` attribute."""
if cls._all_equal([token._.spaczz_type for token in span]):
return span[0]._.spaczz_type
else:
return None
@staticmethod
def get_span_types(span: Span) -> Set[str]:
"""Getter for spaczz_types `Span` attribute."""
types = [token._.spaczz_type for token in span if token._.spaczz_type]
return set(types)
@classmethod
def get_ratio(cls: Type[SpaczzAttrs], span: Span) -> Optional[int]:
"""Getter for spaczz_ratio `Span` attribute."""
if cls._all_equal([token._.spaczz_ratio for token in span]):
return span[0]._.spaczz_ratio
else:
return None
@classmethod
def get_counts(
cls: Type[SpaczzAttrs], span: Span
) -> Optional[Tuple[int, int, int]]:
"""Getter for spaczz_counts `Span` attribute."""
if cls._all_equal([token._.spaczz_counts for token in span]):
return span[0]._.spaczz_counts
else:
return None
@classmethod
def get_details(cls: Type[SpaczzAttrs], span: Span) -> Optional[int]:
"""Getter for current placeholder spaczz_details `Span` attribute."""
if cls._all_equal([token._.spaczz_details for token in span]):
return span[0]._.spaczz_details
else:
return None
@staticmethod
def get_spaczz_doc(doc: Doc) -> bool:
"""Getter for spaczz_doc `Doc` attribute."""
return any([token._.spaczz_token for token in doc])
@staticmethod
def get_doc_types(doc: Doc) -> Set[str]:
"""Getter for spaczz_types `Doc` attribute."""
types = [token._.spaczz_type for token in doc if token._.spaczz_type]
return set(types)
@staticmethod
def _all_equal(iterable: Iterable) -> bool:
"""Tests if all elements of iterable are equal."""
iterator = iter(iterable)
try:
first = next(iterator)
except StopIteration:
return True
return all(first == rest for rest in iterator)
|
dataset_creation/find_answer.py | AseelAlshorafa/SOQAL | 109 | 12769362 | <filename>dataset_creation/find_answer.py
import sys
import os
from scipy import spatial
import numpy as np
def editDistance(str1, str2, m, n):
# edit distance recursive implementation, m = len(str1) and n = len(str2)
dp = [[0 for x in range(n + 1)] for x in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0:
dp[i][j] = j # Min. operations = j
elif j == 0:
dp[i][j] = i # Min. operations = i
elif str1[i - 1] == str2[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = 1 + min(dp[i][j - 1], # Insert
dp[i - 1][j], # Remove
dp[i - 1][j - 1]) # Replace
return dp[m][n]
def concatenateString(paragraph, start, length):
final_string = paragraph[start]
for i in range(1, length):
final_string += " " + paragraph[start + i]
return final_string
def find_answer(paragraph, answer):
# check if answer already in paragraph
correct_answer = ""
score_answer = 1000000
para_words = paragraph.split()
for i in range(0, len(para_words)):
# check max 15 word ranges, reduced for efficiency
for j in range(1, min(15, len(para_words) - i+1)):
candidate = concatenateString(para_words, i, j)
if candidate == answer:
return answer, paragraph.find(answer)
score = editDistance(answer, candidate, len(answer), len(candidate))
if (score < score_answer):
score_answer = score
correct_answer = candidate
return correct_answer, paragraph.find(correct_answer)
def test_find_answer():
p = "أصبحت بلاكبول وبلاكبيرن مع داروين سلطات وحدوية مستقلة "
a = "بلاكبو"
print(find_answer(p, a))
|
helper_scripts/components/headers_helper.py | fengjixuchui/difuze | 347 | 12769369 | def get_all_includes(comp_args, dst_includes):
i = 0
while i < len(comp_args):
curr_arg = comp_args[i].strip()
if curr_arg == "-isystem":
curr_arg1 = "-I" + comp_args[i+1].strip()
if curr_arg1 not in dst_includes:
dst_includes.append(curr_arg1)
if curr_arg == "-include":
curr_arg1 = comp_args[i+1].strip()
if "dhd_sec_feature.h" not in curr_arg1:
final_arg = curr_arg + " " + curr_arg1
if final_arg not in dst_includes:
dst_includes.append(final_arg)
if curr_arg[0:2] == "-I":
if curr_arg not in dst_includes:
if 'drivers' not in curr_arg and 'sound' not in curr_arg:
dst_includes.append(curr_arg)
i += 1
|
tpfd/compat.py | erinxocon/tpfd | 106 | 12769439 | import sys
"""
This module handles import compatibility issues between Python 2 and
Python 3.
"""
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
if is_py2:
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float) |
src/patcher.py | MustangYM/xia0LLDB | 464 | 12769463 | <filename>src/patcher.py
#! /usr/bin/env python3
# ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______
# |______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|
# _ ___ _ _ _____ ____
# (_) / _ \| | | | | __ \| _ \
# __ ___ __ _| | | | | | | | | | | |_) |
# \ \/ / |/ _` | | | | | | | | | | | _ <
# > <| | (_| | |_| | |____| |____| |__| | |_) |
# /_/\_\_|\__,_|\___/|______|______|_____/|____/
# ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______
# |______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|
import lldb
import os
import shlex
import optparse
import json
import re
import utils
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand(
'command script add -f patcher.handle_command patcher -h "patch code in lldb"')
# print('========')
# print('[patcher]: patch code in lldb')
# print('\tpatcher -a patch_addr -i instrument -s instrument_count')
# print('\tmore usage, try "patcher -h"')
def handle_command(debugger, command, exe_ctx, result, internal_dict):
command_args = shlex.split(command, posix=False)
parser = generate_option_parser()
try:
(options, _) = parser.parse_args(command_args)
except:
result.SetError(parser.usage)
return
_ = exe_ctx.target
_ = exe_ctx.thread
if options.patchInstrument:
if options.patchAddress:
patch_addr = int(options.patchAddress, 16)
else:
ret = utils.exe_cmd(debugger, "p/x $pc")
ret = ret.strip()
pattern = '0x[0-9a-f]+'
match = re.search(pattern, ret)
if match:
found = match.group(0)
else:
utils.ELOG("not get address:"+ret)
return
utils.ILOG("you not set patch address, default is current pc address:{}".format(found))
patch_addr = int(found, 16)
patch_ins = options.patchInstrument
# default instrument size is 1
patch_size = 0x1
patch_ins = patch_ins.replace("\"", "")
patch_ins = patch_ins.replace("'", "")
if options.patchSize:
patch_size = int(options.patchSize)
ret = patcher(debugger, patch_ins, patch_addr, patch_size)
result.AppendMessage(str(ret))
else:
result.AppendMessage("[-] args error, check it !")
return
def patch_code(debugger, addr, ins, count):
command_script = '@import Foundation;\n'
command_script += 'uint64_t x_addr = {};\n'.format(addr)
command_script += 'uint8_t patch_data[] = {};\n'.format(ins)
command_script += 'int insCount = {};\n'.format(count)
command_script += r'''
NSMutableString* retStr = [NSMutableString string];
void * patch_addr = (void*)x_addr;
//uint8_t patch_data[] = {0xc0, 0x03, 0x5f, 0xd6};
int patch_data_size = 4*insCount;
// =====================================================patch code=============================================
typedef bool (*patch_code_t)(void* patch_addr, uint8_t* patch_data, int patch_data_size);
patch_code_t patch_code = [](void* patch_addr, uint8_t* patch_data, int patch_data_size) -> bool {
#define PAGE_SIZE 0x0000000000004000
#define PAGE_MASK 0x0000000000003fff
#define RTLD_LAZY 0x1
#define RTLD_NOW 0x2
#define RTLD_LOCAL 0x4
#define RTLD_GLOBAL 0x8
#define VM_PROT_READ ((vm_prot_t) 0x01)
#define VM_PROT_WRITE ((vm_prot_t) 0x02)
#define VM_PROT_EXECUTE ((vm_prot_t) 0x04)
#define PROT_NONE 0x00 /* [MC2] no permissions */
#define PROT_READ 0x01 /* [MC2] pages can be read */
#define PROT_WRITE 0x02 /* [MC2] pages can be written */
#define PROT_EXEC 0x04 /* [MC2] pages can be executed */
#define MAP_SHARED 0x0001
#define MAP_ANON 0x1000
#define KERN_SUCCESS 0
typedef unsigned int mach_port_t;
typedef int kern_return_t;
typedef unsigned int vm_inherit_t;
typedef mach_port_t task_t;
typedef int vm_prot_t;
typedef unsigned long uintptr_t;
typedef uintptr_t vm_offset_t;
typedef vm_offset_t vm_address_t;
typedef uint64_t mach_vm_address_t;
typedef int boolean_t;
typedef int vm_behavior_t;
typedef uint32_t vm32_object_id_t;
typedef uintptr_t vm_size_t;
typedef int *vm_region_recurse_info_t;
typedef unsigned long long memory_object_offset_t;
struct vm_region_submap_short_info_64 {
vm_prot_t protection; /* present access protection */
vm_prot_t max_protection; /* max avail through vm_prot */
vm_inherit_t inheritance;/* behavior of map/obj on fork */
memory_object_offset_t offset; /* offset into object/map */
unsigned int user_tag; /* user tag on map entry */
unsigned int ref_count; /* obj/map mappers, etc */
unsigned short shadow_depth; /* only for obj */
unsigned char external_pager; /* only for obj */
unsigned char share_mode; /* see enumeration */
boolean_t is_submap; /* submap vs obj */
vm_behavior_t behavior; /* access behavior hint */
vm32_object_id_t object_id; /* obj/map name, not a handle */
unsigned short user_wired_count;
};
typedef unsigned int __darwin_natural_t;
typedef __darwin_natural_t natural_t;
typedef natural_t mach_msg_type_number_t;
typedef struct vm_region_submap_short_info_64 vm_region_submap_short_info_data_64_t;
#define VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 \
((mach_msg_type_number_t) \
(sizeof (vm_region_submap_short_info_data_64_t) / sizeof (natural_t)))
#define VM_FLAGS_OVERWRITE 0x4000 /* delete any existing mappings first */
typedef int __int32_t;
typedef __int32_t __darwin_pid_t;
typedef __darwin_pid_t pid_t;
// init value
kern_return_t kret;
task_t self_task = (task_t)mach_task_self();
/* Set platform binary flag */
#define FLAG_PLATFORMIZE (1 << 1)
// platformize_me
// https://github.com/pwn20wndstuff/Undecimus/issues/112
/*
void* handle = (void*)dlopen("/usr/lib/libjailbreak.dylib", RTLD_LAZY);
if (!handle){
//[retStr appendString:@"[-] /usr/lib/libjailbreak.dylib dlopen failed!\n"];
return false;
}
// Reset errors
(const char *)dlerror();
typedef void (*fix_entitle_prt_t)(pid_t pid, uint32_t what);
fix_entitle_prt_t ptr = (fix_entitle_prt_t)dlsym(handle, "jb_oneshot_entitle_now");
const char *dlsym_error = (const char *)dlerror();
if (dlsym_error) return;
ptr((pid_t)getpid(), FLAG_PLATFORMIZE);
//[retStr appendString:@"\n[+] platformize me success!"];
*/
void* target_addr = patch_addr;
// 1. get target address page and patch offset
unsigned long page_start = (unsigned long) (target_addr) & ~PAGE_MASK;
unsigned long patch_offset = (unsigned long)target_addr - page_start;
// map new page for patch
void *new_page = (void *)mmap(NULL, PAGE_SIZE, 0x1 | 0x2, 0x1000 | 0x0001, -1, 0);
if (!new_page ){
//[retStr appendString:@"[-] mmap failed!\n"];
return false;
}
kret = (kern_return_t)vm_copy(self_task, (unsigned long)page_start, PAGE_SIZE, (vm_address_t) new_page);
if (kret != KERN_SUCCESS){
//[retStr appendString:@"[-] vm_copy faild!\n"];
return false;
}
// 4. start patch
/*
nop -> {0x1f, 0x20, 0x03, 0xd5}
ret -> {0xc0, 0x03, 0x5f, 0xd6}
*/
// char patch_ins_data[4] = {0x1f, 0x20, 0x03, 0xd5};
// mach_vm_write(task_self, (vm_address_t)(new+patch_offset), patch_ret_ins_data, 4);
memcpy((void *)((uint64_t)new_page+patch_offset), patch_data, patch_data_size);
//[retStr appendString:@"[+] patch ret[0xc0 0x03 0x5f 0xd6] with memcpy\n"];
// set back to r-x
(int)mprotect(new_page, PAGE_SIZE, PROT_READ | PROT_EXEC);
//[retStr appendString:@"[*] set new page back to r-x success!\n"];
// remap
vm_prot_t prot;
vm_inherit_t inherit;
// get page info
vm_address_t region = (vm_address_t) page_start;
vm_size_t region_len = 0;
struct vm_region_submap_short_info_64 vm_info;
mach_msg_type_number_t info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
natural_t max_depth = 99999;
kret = (kern_return_t)vm_region_recurse_64(self_task, ®ion, ®ion_len,
&max_depth,
(vm_region_recurse_info_t) &vm_info,
&info_count);
if (kret != KERN_SUCCESS){
//[retStr appendString:@"[-] vm_region_recurse_64 faild!\n"];
return false;
}
prot = vm_info.protection & (PROT_READ | PROT_WRITE | PROT_EXEC);
inherit = vm_info.inheritance;
//[retStr appendString:@"[*] get page info done.\n"];
vm_prot_t c;
vm_prot_t m;
mach_vm_address_t target = (mach_vm_address_t)page_start;
kret = (kern_return_t)mach_vm_remap(self_task, &target, PAGE_SIZE, 0,
VM_FLAGS_OVERWRITE, self_task,
(mach_vm_address_t) new_page, true,
&c, &m, inherit);
if (kret != KERN_SUCCESS){
//[retStr appendString:@"[-] remap mach_vm_remap faild!\n"];
return false;
}
//[retStr appendString:@"[+] remap to target success!\n"];
// clear cache
void* clear_start_ = (void*)(page_start + patch_offset);
sys_icache_invalidate (clear_start_, 4);
sys_dcache_flush (clear_start_, 4);
return true;
};
// =====================================================patch code=============================================
patch_code(patch_addr, patch_data, patch_data_size);
[retStr appendString:@"patch done."];
retStr
'''
retStr = utils.exe_script(debugger, command_script)
return utils.hex_int_in_str(retStr)
def is_raw_data(data):
# pylint: disable=anomalous-backslash-in-string
pattern = "\{\s*0x[0-9a-fA-F]{2}\s*,\s*0x[0-9a-fA-F]{2}\s*,\s*0x[0-9a-fA-F]{2}\s*,\s*0x[0-9a-fA-F]{2}\s*\}"
ret = re.match(pattern, data)
if not ret:
return False
return True
def patcher(debugger, ins, addr, size):
if is_raw_data(ins):
utils.ILOG("detect you manual set ins data:{}".format(ins))
utils.ILOG("start patch text at address:{} size:{} to ins data:{}".format(hex(addr), size, ins))
patch_code(debugger, hex(addr), ins, size)
return "[x] power by xia0@2019"
supportInsList = {'nop':'0x1f, 0x20, 0x03, 0xd5 ', 'ret':'0xc0, 0x03, 0x5f, 0xd6', 'mov0':'0x00, 0x00, 0x80, 0xd2', 'mov1':'0x20, 0x00, 0x80, 0xd2'}
if ins not in supportInsList.keys():
utils.ELOG("patcher not support this ins type:{}".format(ins))
return "[x] power by xia0@2019"
utils.ILOG("start patch text at address:{} size:{} to ins:\"{}\" and data:{}".format(hex(addr), size, ins, supportInsList[ins]))
# for i in range(size):
# patch_code(debugger, hex(curPatchAddr), supportInsList[ins])
# utils.SLOG("current patch address:{} patch done".format(hex(curPatchAddr)))
# curPatchAddr += 4
ins_data = ""
for i in range(size):
ins_data += supportInsList[ins]
if i != size - 1:
ins_data += ","
build_ins_data = "{" + ins_data + "}"
utils.ILOG("make ins data:\n{}".format(build_ins_data))
patch_code(debugger, hex(addr), build_ins_data, size)
utils.SLOG("patch done")
return "[x] power by xia0@2019"
def generate_option_parser():
usage = "patcher"
parser = optparse.OptionParser(usage=usage, prog="lookup")
parser.add_option("-a", "--address",
action="store",
default=None,
dest='patchAddress',
help="need patch code address")
parser.add_option("-i", "--instrument",
action="store",
default=None,
dest='patchInstrument',
help="patch instrument type")
parser.add_option("-s", "--size",
action="store",
default=None,
dest='patchSize',
help="patch instrument count")
return parser
|
tests/ingestion/transformers/monosi/test_monitors.py | monosidev/monosi | 156 | 12769487 | <gh_stars>100-1000
import pytest
import ingestion.transformers.monosi.monitors as monitors
@pytest.fixture
def schema():
return {
'columns': ['NAME', 'COL_NAME', 'COL_TYPE', 'COL_DESCRIPTION', 'COL_SORT_ORDER', 'DATABASE', 'SCHEMA', 'DESCRIPTION', 'IS_VIEW'],
'rows': [
{
'NAME': 'name_of_table',
'COL_NAME': 'name_of_col',
'COL_TYPE': 'timestamp_tz',
'COL_DESCRIPTION': None,
'COL_SORT_ORDER': '3',
'DATABASE': 'database',
'SCHEMA': 'schema',
'DESCRIPTION': None,
'IS_VIEW': 'false'
},
{
'NAME': 'name_of_table',
'COL_NAME': 'name_of_col_2',
'COL_TYPE': 'text',
'COL_DESCRIPTION': None,
'COL_SORT_ORDER': '3',
'DATABASE': 'database',
'SCHEMA': 'schema',
'DESCRIPTION': None,
'IS_VIEW': 'false'
},
{
'NAME': 'name_of_table_2',
'COL_NAME': 'name_of_col_3',
'COL_TYPE': 'int',
'COL_DESCRIPTION': None,
'COL_SORT_ORDER': '3',
'DATABASE': 'database',
'SCHEMA': 'schema',
'DESCRIPTION': None,
'IS_VIEW': 'false'
},
]
}
def test__transform_empty():
input_arr = {'rows': []}
output_arr = monitors.MonitorTransformer._transform(input_arr)
assert len(output_arr) == 0
def test__transform(schema):
output_arr = monitors.MonitorTransformer._transform(schema)
expected_num_monitors = 2
assert len(output_arr) == expected_num_monitors
@pytest.fixture
def monitor():
return {}
@pytest.fixture
def normalized_schema():
return monitors.MonitorTransformer._normalized_schema()
def test__normalized_schema_correct(normalized_schema, monitor):
input_arr = [monitor]
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == True
def test__normalized_schema_correct_multiple(normalized_schema, monitor):
input_arr = [monitor, monitor]
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == True
def test__normalized_schema_incorrect_to_have_none(normalized_schema):
input_arr = []
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == False
def test__normalized_schema_incorrect(normalized_schema):
input_arr = [{"anything": "goeshere"}]
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == False
def test__normalized_schema_incorrect_multiple(normalized_schema):
input_arr = [{}, {"anything": "goeshere"}]
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == False
@pytest.fixture
def original_schema():
return monitors.MonitorTransformer._original_schema()
def test__original_schema_correct(original_schema, schema):
is_correct = monitors.MonitorTransformer.match(schema, original_schema)
assert is_correct == True
def test__original_schema_incorrect_to_have_none(original_schema):
is_correct = monitors.MonitorTransformer.match({}, original_schema)
assert is_correct == False
def test__original_schema_incorrect(original_schema):
input_arr = {'anything': 'goeshere'}
is_correct = monitors.MonitorTransformer.match(input_arr, original_schema)
assert is_correct == False
|
mushroom_rl/environments/mujoco_envs/humanoid_gait/humanoid_gait.py | PuzeLiu/mushroom-rl | 344 | 12769513 | import mujoco_py
from pathlib import Path
from mushroom_rl.utils import spaces
from mushroom_rl.environments.mujoco import MuJoCo, ObservationType
from mushroom_rl.utils.running_stats import *
from ._external_simulation import NoExternalSimulation, MuscleSimulation
from .reward_goals import CompleteTrajectoryReward, VelocityProfileReward, \
MaxVelocityReward, NoGoalReward, HumanoidTrajectory
from mushroom_rl.environments.mujoco_envs.humanoid_gait.utils import quat_to_euler
class HumanoidGait(MuJoCo):
"""
Mujoco simulation of a Humanoid Model, based on:
"A deep reinforcement learning based approach towards generating human
walking behavior with a neuromuscular model".
<NAME>., <NAME>., <NAME>., and <NAME>. (2019).
"""
def __init__(self, gamma=0.99, horizon=2000, n_intermediate_steps=10,
use_muscles=True, goal_reward=None, goal_reward_params=None,
obs_avg_window=1, act_avg_window=1):
"""
Constructor.
Args:
gamma (float, 0.99): discount factor for the environment;
horizon (int, 2000): horizon for the environment;
n_intermediate_steps (int, 10): number of steps to apply the same
action to the environment and wait for the next observation;
use_muscles (bool): if external muscle simulation should be used
for actions. If not apply torques directly to the joints;
goal_reward (string, None): type of trajectory used for training
Options available:
'trajectory' - Use trajectory in assets/GaitTrajectory.npz
as reference;
'com_vel_trajectory' - Use only velocity trajectory of COM in
assets/GaitTrajectory.npz as reference;
'vel_profile' - Velocity goal for the center of mass of the
model to follow. The goal is given by a
VelocityProfile instance (or subclass).
And should be included in the
``goal_reward_params``;
'max_vel' - Tries to achieve the maximum possible
velocity;
None - Follows no goal(just tries to survive);
goal_reward_params (dict, None): params needed for creation goal
reward;
obs_avg_window (int, 1): size of window used to average
observations;
act_avg_window (int, 1): size of window used to average actions.
"""
self.use_muscles = use_muscles
self.goal_reward = goal_reward
self.act_avg_window = act_avg_window
self.obs_avg_window = obs_avg_window
model_path = Path(__file__).resolve().parent.parent / "data" / "humanoid_gait" / "human7segment.xml"
action_spec = ["right_hip_frontal", "right_hip_sagittal",
"right_knee", "right_ankle", "left_hip_frontal",
"left_hip_sagittal", "left_knee", "left_ankle",
]
observation_spec = [("root", ObservationType.JOINT_POS),
("right_hip_frontal", ObservationType.JOINT_POS),
("right_hip_sagittal", ObservationType.JOINT_POS),
("right_knee", ObservationType.JOINT_POS),
("right_ankle", ObservationType.JOINT_POS),
("left_hip_frontal", ObservationType.JOINT_POS),
("left_hip_sagittal", ObservationType.JOINT_POS),
("left_knee", ObservationType.JOINT_POS),
("left_ankle", ObservationType.JOINT_POS),
("root", ObservationType.JOINT_VEL),
("right_hip_frontal", ObservationType.JOINT_VEL),
("right_hip_sagittal", ObservationType.JOINT_VEL),
("right_knee", ObservationType.JOINT_VEL),
("right_ankle", ObservationType.JOINT_VEL),
("left_hip_frontal", ObservationType.JOINT_VEL),
("left_hip_sagittal", ObservationType.JOINT_VEL),
("left_knee", ObservationType.JOINT_VEL),
("left_ankle", ObservationType.JOINT_VEL),
]
collision_groups = [("floor", ["floor"]),
("left_foot", ["left_foot"]),
("right_foot", ["right_foot"])
]
super().__init__(model_path.as_posix(), action_spec, observation_spec, gamma=gamma,
horizon=horizon, n_substeps=1,
n_intermediate_steps=n_intermediate_steps,
collision_groups=collision_groups)
if use_muscles:
self.external_actuator = MuscleSimulation(self._sim)
self.info.action_space = spaces.Box(
*self.external_actuator.get_action_space())
else:
self.external_actuator = NoExternalSimulation()
low, high = self.info.action_space.low.copy(),\
self.info.action_space.high.copy()
self.norm_act_mean = (high + low) / 2.0
self.norm_act_delta = (high - low) / 2.0
self.info.action_space.low[:] = -1.0
self.info.action_space.high[:] = 1.0
if goal_reward_params is None:
goal_reward_params = dict()
if goal_reward == "trajectory" or goal_reward == "com_vel_trajectory":
control_dt = self._sim.model.opt.timestep * self._n_intermediate_steps
self.goal_reward = CompleteTrajectoryReward(self._sim, control_dt,
**goal_reward_params)
elif goal_reward == "vel_profile":
self.goal_reward = VelocityProfileReward(self._sim, **goal_reward_params)
elif goal_reward == "max_vel":
self.goal_reward = MaxVelocityReward(self._sim, **goal_reward_params)
elif goal_reward is None:
self.goal_reward = NoGoalReward()
else:
raise NotImplementedError("The specified goal reward has not been"
"implemented: ", goal_reward)
if goal_reward == "trajectory":
self.reward_weights = dict(live_reward=0.10, goal_reward=0.40,
traj_vel_reward=0.50,
move_cost=0.10, fall_cost=0.00)
elif goal_reward == "com_vel_trajectory":
self.reward_weights = dict(live_reward=0.00, goal_reward=0.00,
traj_vel_reward=1.00,
move_cost=0.00, fall_cost=0.00)
else:
self.reward_weights = dict(live_reward=0.10, goal_reward=0.90,
traj_vel_reward=0.00,
move_cost=0.10, fall_cost=0.00)
self.info.observation_space = spaces.Box(*self._get_observation_space())
self.mean_grf = RunningAveragedWindow(shape=(6,),
window_size=n_intermediate_steps)
self.mean_vel = RunningExpWeightedAverage(shape=(3,), alpha=0.005)
self.mean_obs = RunningAveragedWindow(
shape=self.info.observation_space.shape,
window_size=obs_avg_window
)
self.mean_act = RunningAveragedWindow(
shape=self.info.action_space.shape, window_size=act_avg_window)
def step(self, action):
action = ((action.copy() * self.norm_act_delta) + self.norm_act_mean)
state, reward, absorbing, info = super().step(action)
self.mean_obs.update_stats(state)
self.mean_vel.update_stats(self._sim.data.qvel[0:3])
avg_obs = self.mean_obs.mean
avg_obs[13:16] = self.mean_vel.mean
return avg_obs, reward, absorbing, info
def render(self):
if self._viewer is None:
self._viewer = mujoco_py.MjViewer(self._sim)
self._viewer._render_every_frame = True
self._viewer.render()
def _setup(self):
self.goal_reward.reset_state()
start_obs = self._reset_model(qpos_noise=0.0, qvel_noise=0.0)
start_vel = (
self._sim.data.qvel[0:3] if (self.goal_reward is None or isinstance(
self.goal_reward, MaxVelocityReward)
) else self.goal_reward.get_observation())
self.mean_vel.reset(start_vel)
self.mean_obs.reset(start_obs)
self.mean_act.reset()
self.external_actuator.reset()
def _reward(self, state, action, next_state):
live_reward = 1.0
goal_reward = self.goal_reward(state, action, next_state)
traj_vel_reward = 0.0
if isinstance(self.goal_reward, HumanoidTrajectory):
traj_vel_reward = np.exp(-20.0 * np.square(
next_state[13] - next_state[33]))
move_cost = self.external_actuator.cost(
state, action / self.norm_act_delta, next_state)
fall_cost = 0.0
if self._has_fallen(next_state):
fall_cost = 1.0
total_reward = self.reward_weights["live_reward"] * live_reward \
+ self.reward_weights["goal_reward"] * goal_reward \
+ self.reward_weights["traj_vel_reward"] * traj_vel_reward \
- self.reward_weights["move_cost"] * move_cost \
- self.reward_weights["fall_cost"] * fall_cost
return total_reward
def _is_absorbing(self, state):
return (self._has_fallen(state)
or self.goal_reward.is_absorbing(state)
or self.external_actuator.is_absorbing(state)
)
def _get_observation_space(self):
sim_low, sim_high = (self.info.observation_space.low[2:],
self.info.observation_space.high[2:])
grf_low, grf_high = (-np.ones((6,)) * np.inf,
np.ones((6,)) * np.inf)
r_low, r_high = self.goal_reward.get_observation_space()
a_low, a_high = self.external_actuator.get_observation_space()
return (np.concatenate([sim_low, grf_low, r_low, a_low]),
np.concatenate([sim_high, grf_high, r_high, a_high]))
def _reset_model(self, qpos_noise=0.0, qvel_noise=0.0):
self._set_state(self._sim.data.qpos + np.random.uniform(
low=-qpos_noise, high=qpos_noise, size=self._sim.model.nq),
self._sim.data.qvel + np.random.uniform(low=-qvel_noise,
high=qvel_noise,
size=self._sim.model.nv)
)
return self._create_observation()
def _set_state(self, qpos, qvel):
old_state = self._sim.get_state()
new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,
old_state.act, old_state.udd_state)
self._sim.set_state(new_state)
self._sim.forward()
@staticmethod
def _has_fallen(state):
torso_euler = quat_to_euler(state[1:5])
return ((state[0] < 0.90) or (state[0] > 1.20)
or abs(torso_euler[0]) > np.pi / 12
or (torso_euler[1] < -np.pi / 12) or (torso_euler[1] > np.pi / 8)
or (torso_euler[2] < -np.pi / 4) or (torso_euler[2] > np.pi / 4)
)
def _create_observation(self):
"""
Creates full vector of observations:
obs[0:13] -> qpos(from mujoco obs)
obs[0] -> torso z pos
obs[1:5] -> torso quaternion orientation
obs[5:13] -> leg joints angle
obs[13:27] -> qvel(from mujoco obs)
obs[13:16] -> torso linear velocity
obs[16:19] -> torso angular velocity
obs[19:27] -> leg joints angular velocity
obs[27:30] -> ground force
obs[27:30] -> ground force on right foot(xyz)
obs[30:33] -> ground force on left foot(xyz)
obs[33:33+(len(goal_observation)] -> observations related
to the goal
obs[last_obs_id - len(ext_actuator_obs): last_obs_id]
-> observations related to the external actuator
"""
obs = np.concatenate([super(HumanoidGait, self)._create_observation()[2:],
self.mean_grf.mean / 1000.,
self.goal_reward.get_observation(),
self.external_actuator.get_observation()
]).flatten()
return obs
def _preprocess_action(self, action):
action = self.external_actuator.preprocess_action(action)
self.mean_act.update_stats(action)
return self.mean_act.mean
def _step_init(self, state, action):
self.external_actuator.initialize_internal_states(state, action)
def _compute_action(self, action):
action = self.external_actuator.external_stimulus_to_joint_torques(
action
)
return action
def _simulation_post_step(self):
grf = np.concatenate(
[self._get_collision_force("floor", "right_foot")[:3],
self._get_collision_force("floor", "left_foot")[:3]]
)
self.mean_grf.update_stats(grf)
def _step_finalize(self):
self.goal_reward.update_state()
self.external_actuator.update_state()
def _get_body_center_of_mass_pos(self, body_name):
return self._sim.data.subtree_com[
self._sim.model._body_name2id[body_name]]
|
discretize/utils/code_utils.py | ngodber/discretize | 123 | 12769527 | <filename>discretize/utils/code_utils.py
import numpy as np
import warnings
SCALARTYPES = (complex, float, int, np.number)
def is_scalar(f):
"""Determine if the input argument is a scalar.
The function **is_scalar** returns *True* if the input is an integer,
float or complex number. The function returns *False* otherwise.
Parameters
----------
f :
Any input quantity
Returns
-------
bool :
- *True* if the input argument is an integer, float or complex number
- *False* otherwise
"""
if isinstance(f, SCALARTYPES):
return True
elif isinstance(f, np.ndarray) and f.size == 1 and isinstance(f[0], SCALARTYPES):
return True
return False
def as_array_n_by_dim(pts, dim):
"""Ensures the given array will have *dim* columns.
The function **as_array_n_by_dim** will examine the *pts* array,
and coerce it to be at least if the number of columns is equal to *dim*.
This is similar to the :func:`numpy.atleast_2d`, except that it ensures that then
input has *dim* columns, and it appends a :data:`numpy.newaxis` to 1D arrays
instead of prepending.
Parameters
----------
pts : array_like
array to check.
dim : int
The number of columns which *pts* should have
Returns
-------
(n_pts, dim) numpy.ndarray
verified array
"""
if type(pts) == list:
pts = np.array(pts)
if not isinstance(pts, np.ndarray):
raise TypeError("pts must be a numpy array")
if dim > 1:
pts = np.atleast_2d(pts)
elif len(pts.shape) == 1:
pts = pts[:, np.newaxis]
if pts.shape[1] != dim:
raise ValueError(
"pts must be a column vector of shape (nPts, {0:d}) not ({1:d}, {2:d})".format(
*((dim,) + pts.shape)
)
)
return pts
def requires(modules):
"""Decorator to wrap functions with soft dependencies.
This function was inspired by the `requires` function of pysal,
which is released under the 'BSD 3-Clause "New" or "Revised" License'.
https://github.com/pysal/pysal/blob/master/pysal/lib/common.py
Parameters
----------
modules : dict
Dictionary containing soft dependencies, e.g.,
{'matplotlib': matplotlib}.
Returns
-------
decorated_function : function
Original function if all soft dependencies are met, otherwise
it returns an empty function which prints why it is not running.
"""
# Check the required modules, add missing ones in the list `missing`.
missing = []
for key, item in modules.items():
if item is False:
missing.append(key)
def decorated_function(function):
"""Wrap function."""
if not missing:
return function
else:
def passer(*args, **kwargs):
print(("Missing dependencies: {d}.".format(d=missing)))
print(("Not running `{}`.".format(function.__name__)))
return passer
return decorated_function
def deprecate_class(removal_version=None, new_location=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
def decorator(cls):
my_name = cls.__name__
parent_name = cls.__bases__[0].__name__
message = f"{my_name} has been deprecated, please use {parent_name}."
if removal_version is not None:
message += (
f" It will be removed in version {removal_version} of discretize."
)
else:
message += " It will be removed in a future version of discretize."
# stash the original initialization of the class
cls._old__init__ = cls.__init__
def __init__(self, *args, **kwargs):
warnings.warn(message, Warning)
self._old__init__(*args, **kwargs)
cls.__init__ = __init__
if new_location is not None:
parent_name = f"{new_location}.{parent_name}"
cls.__doc__ = f""" This class has been deprecated, see `{parent_name}` for documentation"""
return cls
return decorator
def deprecate_module(old_name, new_name, removal_version=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
message = f"The {old_name} module has been deprecated, please use {new_name}."
if removal_version is not None:
message += f" It will be removed in version {removal_version} of discretize"
else:
message += " It will be removed in a future version of discretize."
message += " Please update your code accordingly."
warnings.warn(message, Warning)
def deprecate_property(new_name, old_name, removal_version=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
if removal_version is not None:
tag = f" It will be removed in version {removal_version} of discretize."
else:
tag = " It will be removed in a future version of discretize."
def get_dep(self):
class_name = type(self).__name__
message = (
f"{class_name}.{old_name} has been deprecated, please use {class_name}.{new_name}."
+ tag
)
warnings.warn(message, Warning)
return getattr(self, new_name)
def set_dep(self, other):
class_name = type(self).__name__
message = (
f"{class_name}.{old_name} has been deprecated, please use {class_name}.{new_name}."
+ tag
)
warnings.warn(message, Warning)
setattr(self, new_name, other)
doc = f"""
`{old_name}` has been deprecated. See `{new_name}` for documentation.
See Also
--------
{new_name}
"""
return property(get_dep, set_dep, None, doc)
def deprecate_method(new_name, old_name, removal_version=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
if removal_version is not None:
tag = f" It will be removed in version {removal_version} of discretize."
else:
tag = " It will be removed in a future version of discretize."
def new_method(self, *args, **kwargs):
class_name = type(self).__name__
warnings.warn(
f"{class_name}.{old_name} has been deprecated, please use {class_name}.{new_name}."
+ tag,
Warning,
)
return getattr(self, new_name)(*args, **kwargs)
doc = f"""
`{old_name}` has been deprecated. See `{new_name}` for documentation
See Also
--------
{new_name}
"""
new_method.__doc__ = doc
return new_method
def deprecate_function(new_function, old_name, removal_version=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
new_name = new_function.__name__
if removal_version is not None:
tag = f" It will be removed in version {removal_version} of discretize."
else:
tag = " It will be removed in a future version of discretize."
def dep_function(*args, **kwargs):
warnings.warn(
f"{old_name} has been deprecated, please use {new_name}." + tag,
Warning,
)
return new_function(*args, **kwargs)
doc = f"""
`{old_name}` has been deprecated. See `{new_name}` for documentation
See Also
--------
{new_name}
"""
dep_function.__doc__ = doc
return dep_function
# DEPRECATIONS
isScalar = deprecate_function(is_scalar, "isScalar", removal_version="1.0.0", future_warn=False)
asArray_N_x_Dim = deprecate_function(
as_array_n_by_dim, "asArray_N_x_Dim", removal_version="1.0.0", future_warn=False
)
|
network/layer_implementations/ConvLSTMCell.py | DesperateMaker/TrackR-CNN | 522 | 12769547 | import tensorflow as tf
from network.Util import smart_shape
RNNCell = tf.nn.rnn_cell.RNNCell
LSTMStateTuple = tf.nn.rnn_cell.LSTMStateTuple
def _conv2d(x, W, strides=None):
if strides is None:
strides = [1, 1]
return tf.nn.conv2d(x, W, strides=[1] + strides + [1], padding="SAME")
def dynamic_conv_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
# inputs should have shape (time, batch, height, width, feature)
input_shape = smart_shape(inputs)
num_units = cell.num_units()
h, final_state = tf.nn.dynamic_rnn(cell, inputs, sequence_length, initial_state, dtype, parallel_iterations,
swap_memory, time_major, scope)
h = tf.reshape(h, tf.stack([input_shape[0], input_shape[1], input_shape[2], input_shape[3], num_units]))
return h, final_state
# similar to https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py
# for maximal flexibility we allow to pass the weights externally
class ConvLSTMCell(RNNCell):
def __init__(self, num_units, height, width, filter_size, forget_bias=1.0, activation=tf.tanh, W=None, b=None):
self._num_units = num_units
self._height = height
self._width = width
self._size = num_units * height * width
self._forget_bias = forget_bias
self._activation = activation
self._filter_size = list(filter_size)
if W is not None:
W_shape = W.get_shape().as_list()
assert len(W_shape) == 4
assert W_shape[:2] == self._filter_size
assert W_shape[-1] == 4 * self._num_units
self._W = W
else:
self._W = None
if b is not None:
b_shape = b.get_shape().as_list()
assert len(b_shape) == 1
assert b_shape[0] == 4 * self._num_units
self._b = b
else:
self._b = None
def __call__(self, inputs, state, scope=None):
#inputs: `2-D` tensor with shape `[batch_size x input_size]`.
#state: tuple with shapes `[batch_size x s] for s in self.state_size
with tf.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
c, h = state
concat = self._conv(inputs, h)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(axis=3, num_or_size_splits=4, value=concat)
batch = inputs.get_shape().as_list()[0]
if batch is None:
batch = tf.shape(inputs)[0]
i, j, f, o = [tf.reshape(x, [batch, -1]) for x in [i, j, f, o]]
new_c = (c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * tf.sigmoid(o)
new_state = LSTMStateTuple(new_c, new_h)
return new_h, new_state
def _conv(self, inputs, h):
batch = inputs.get_shape().as_list()[0]
if batch is None:
batch = tf.shape(inputs)[0]
n_input_features = inputs.get_shape().as_list()[-1]
#inputs = tf.reshape(inputs, [batch, self._height, self._width, n_input_features])
h = tf.reshape(h, [batch, self._height, self._width, self._num_units])
inp = tf.concat([inputs, h], axis=3)
if self._W is not None:
W = self._W
assert W.get_shape().as_list()[2] == n_input_features + self._num_units
else:
W = tf.get_variable("W", shape=(self._filter_size + [n_input_features + self._num_units, 4 * self._num_units]))
if self._b is not None:
b = self._b
else:
zero_initializer = tf.constant_initializer(0.0, dtype=inputs.dtype)
b = tf.get_variable("b", shape=(4 * self._num_units), initializer=zero_initializer)
y = _conv2d(inp, W) + b
return y
def num_units(self):
return self._num_units
@property
def state_size(self):
return LSTMStateTuple(self._size, self._size)
@property
def output_size(self):
return self._size
|
AutotestWebD/apps/myadmin/service/UserService.py | yangjourney/sosotest | 422 | 12769554 | import apps.common.func.InitDjango
from all_models.models import TbUser, TbAdminUserPermissionRelation
from apps.common.func.WebFunc import *
class UserService(object):
@staticmethod
def getUsers():
return TbUser.objects.all()
@staticmethod
def getUserByLoginname(loginname):
return TbUser.objects.filter(loginName=loginname)
@staticmethod
def updateUser(userData):
tbModel = TbUser.objects.filter(id=userData["id"])
tbModel.update(**userData)
if __name__ == "__main__":
# print(UserService.getUsers()[0])
#permissionDict = UserPermission.getUserPermissions("liyc", "/interfaceTest/HTTP_InterfaceListCheck")
#print(permissionDict)
# print("permissionDict:", permissionDict)
#print("interfaceDict:", interfaceDict)
permissionsList = UserPermission.getOthersPermissions("liyc", ['lining02', 'gaozhe', 'qinjp', 'yongwy', 'pengjie', 'tanglu', 'hongln'], "/interfaceTest/HTTP_GlobalTextConfListPage")
# print("permissionsList:", permissionsList)
# print(UserService.getUserByLoginname(UserService.getUsers()[0].loginName))
|
base_solver/pytorch-captcha-recognition/my_dataset.py | johnnyzn/DW-GAN | 109 | 12769557 | <gh_stars>100-1000
# -*- coding: UTF-8 -*-
import os
from torch.utils.data import DataLoader,Dataset
import torchvision.transforms as transforms
from PIL import Image
import one_hot_encoding as ohe
import captcha_setting
import numpy as np
import cv2
class mydataset(Dataset):
def __init__(self, folder, folder_2 = None, transform=None):
self.train_image_file_paths = [os.path.join(folder, image_file) for image_file in os.listdir(folder)]
if(folder_2 is not None):
self.train_image_file_paths = self.train_image_file_paths + [os.path.join(folder_2, image_file) for image_file in os.listdir(folder_2)]
print(len(self.train_image_file_paths))
self.transform = transform
def __len__(self):
return len(self.train_image_file_paths)
def __getitem__(self, idx):
image_root = self.train_image_file_paths[idx]
image_name = image_root.split('/')[-1]
image = Image.open(image_root)
#print(image)
fix_size = (160, 60)
image = image.resize(fix_size)
# print(image_name)
if self.transform is not None:
image = self.transform(image)
# print(image_name)
if('_' in image_name):
label = ohe.encode(image_name.split('_')[0].upper())
else:
label = ohe.encode(image_name.split('.')[0].upper())
return image, label, image_name
def gaussian_blur(img):
image = np.array(img)
image_blur = cv2.GaussianBlur(image,(5,5),3)
new_image = image_blur
return new_image
transform = transforms.Compose([
# transforms.ColorJitter(),
transforms.Grayscale(),
# transforms.Lambda(gaussian_blur),
transforms.ToTensor(),
# transforms.Normalize(mean=[0.9], std=[0.4]),
])
def get_train_data_loader(s=True,d=200):
print('data path: ', captcha_setting.TRAIN_DATASET_PATH)
# dataset = mydataset(captcha_setting.TRAIN_DATASET_PATH, captcha_setting.TRAIN_DATASET_PATH_2, transform=transform)
dataset = mydataset(captcha_setting.TRAIN_DATASET_PATH, transform=transform)
return DataLoader(dataset, batch_size=512, shuffle=s)
def get_test_train_data_loader(s=True,d=256):
dataset = mydataset(captcha_setting.TRAIN_DATASET_PATH, transform=transform)
return DataLoader(dataset, batch_size=d, shuffle=s)
def get_test_data_loader(s=False,d=1):
print(captcha_setting.TEST_DATASET_PATH)
dataset = mydataset(captcha_setting.TEST_DATASET_PATH, transform=transform)
return DataLoader(dataset, batch_size=d, shuffle=s)
def get_predict_data_loader(s=True,d=1):
dataset = mydataset(captcha_setting.PREDICT_DATASET_PATH, transform=transform)
return DataLoader(dataset, batch_size=d, shuffle=s) |
utils/det_filter.py | rishyak/liverseg-2017-nipsws | 107 | 12769564 | <gh_stars>100-1000
import numpy as np
from scipy import misc
import os
import scipy.io
from PIL import Image
def filter(base_root, crops_list='crops_LiTS_gt.txt', input_config='masked_out_lesion', results_list='detection_lesion_example', th=0.5):
crops_list = base_root + 'utils/crops_list/' + crops_list
results_list = base_root + 'detection_results/' + results_list + '/soft_results.txt'
if crops_list is not None:
with open(crops_list) as t:
crops_lines = t.readlines()
input_results_path = base_root + 'results/' + input_config
output_results_path = base_root + 'results/det_' + input_config
if not os.path.exists(os.path.join(output_results_path)):
os.makedirs(os.path.join(output_results_path))
if results_list is not None:
with open(results_list) as t:
results_lines = t.readlines()
for i in range(105, 131):
folder_name = str(i)
images = []
nm = folder_name + '/'
for x in results_lines:
if nm in x:
images.append(x)
slices_names = []
if not os.path.exists(os.path.join(output_results_path, folder_name)):
os.makedirs(os.path.join(output_results_path, folder_name))
for j in range(len(images)):
slices_names.append(images[j].split()[0])
unique_slices_names = np.unique(slices_names)
for x in range(len(unique_slices_names)):
total_mask = []
for l in range(len(slices_names)):
if slices_names[l] == unique_slices_names[x]:
if float(images[l].split()[3]) > th:
aux_mask = np.zeros([512, 512])
x_bb = int(float(images[l].split()[1]))
y_bb = int(float(images[l].split()[2].split('\n')[0]))
aux_name = images[l].split()[0] + '.png'
total_patch = (np.array(Image.open(os.path.join(input_results_path, aux_name)), dtype=np.uint8))/255.0
cropped_patch = total_patch[x_bb: (x_bb + 80), y_bb:(y_bb + 80)]
aux_mask[x_bb: (x_bb + 80), y_bb:(y_bb + 80)] = cropped_patch
total_mask.append(aux_mask)
if len(total_mask) > 0:
if len(total_mask) > 1:
summed_mask = np.sum(total_mask, axis=0)
else:
summed_mask = np.array(total_mask)[0]
thresholded_total_mask = np.greater(total_mask, 0.0).astype(float)
summed_thresholded_total_mask = np.sum(thresholded_total_mask, axis= 0)
summed_thresholded_total_mask[summed_thresholded_total_mask == 0.0] = 1.0
summed_mask = np.divide(summed_mask, summed_thresholded_total_mask)
summed_mask = summed_mask*255.0
name = unique_slices_names[x].split('.')[0] + '.png'
scipy.misc.imsave(os.path.join(output_results_path, name), summed_mask)
for i in range(len(crops_lines)):
result = crops_lines[i].split(' ')
if len(result) > 2:
id_img, bool_zoom, mina, maxa, minb, maxb = result
else:
id_img, bool_zoom = result
if int(id_img.split('/')[-2]) > 104:
if not os.path.exists(os.path.join(output_results_path, id_img + '.png')):
mask = np.zeros([512, 512])
misc.imsave(os.path.join(output_results_path, id_img + '.png'), mask)
|
Joy_QA_Platform/ApiManager/operations/operation_task.py | bzc128/Joy_QA_Platform | 123 | 12769566 | import datetime
import json
import re
import os
import requests
import time
import threading
import pickle
from django.core.mail import send_mail
from django.db import connection
from django.http import JsonResponse
from django.shortcuts import render_to_response, render
from django.core.cache import cache
from ApiManager.utils import schedule
from ApiManager.utils.case_utils import run_case_by_id
from ApiManager.utils.forms import TaskModelForm
from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord
from frame.utils.common import get_ajax_msg, dataToJson
from ApiManager.utils.forms import get_validate_form_msg
from ApiManager.utils.utils import pagination_for_objects
from Joy_QA_Platform.settings import EMAIL_FROM
from Joy_QA_Platform.configs import AUTH_ADD_TASK, AUTH_DELETE, AUTH_UPDATE, AUTH_VIEW, EMAIL_SUFFIX
is_timer_start = False
run_task_list = []
run_job_dict = {}
def task_list(request):
if request.method == "GET":
return render(request, 'api/task_list.html')
elif request.method == "POST":
index = int(request.POST.get('index'))
env_name_dic = {}
project_name_dic = {}
module_name_dic = {}
results = filter_tasks_for_user(request.user, TaskInfo.objects.filter().order_by('-id'), AUTH_VIEW)
tasks = pagination_for_objects(results, index)
if tasks is not None and len(tasks) > 0:
for task in tasks:
append_env_dict(task, env_name_dic)
append_project_dict(task, project_name_dic)
append_module_dict(task, module_name_dic)
count = len(results)
task_info_list = []
for task in tasks:
task_dict = task2Dict(task)
task_info_list.append(task_dict)
data = dataToJson(task_info_list)
return JsonResponse(get_ajax_msg(1, 1, '获取任务列表成功', {'tasks': data, 'count': count, 'currPage': index,
'envInfo': env_name_dic,
'proInfo': project_name_dic,
'moduleInfo': module_name_dic}))
def task_create(request):
if request.method == 'GET':
return render(request, 'api/task_new.html')
elif request.user.has_perm(AUTH_ADD_TASK):
if request.method == 'POST':
model_form = TaskModelForm(request.POST)
if model_form.is_valid():
task_name = request.POST.get('task_name')
env_id = request.POST.get('belong_env')
project_id = request.POST.get('belong_project')
module_id = request.POST.get('belong_module')
emails = request.POST.get('receiver_email')
start_time = datetime.datetime.fromtimestamp(int(request.POST.get('start_time')) / 1000)
if request.POST.get('is_loop') == 'true':
is_loop = True
elif request.POST.get('is_loop') == 'false':
is_loop = False
interval_minute = request.POST.get('interval_minute')
error_msg = None
if not EnvInfo.objects.filter(id=env_id).exists():
error_msg = '此环境不存在'
elif not ProjectInfo.objects.filter(id=project_id).exists():
error_msg = '此项目不存在'
elif not ModuleInfo.objects.filter(id=module_id).exists():
error_msg = '此模块不存在'
elif TaskInfo.objects.filter(task_name=task_name, belong_module_id=module_id).exists():
error_msg = '已存在此任务'
elif start_time <= datetime.datetime.now():
error_msg = '任务开始时间早于当前时间'
elif is_loop and int(interval_minute) < 1:
error_msg = '任务开始循环间隔时间不能小于1分钟'
elif not validate_emails(emails.split(';')):
error_msg = '邮箱格式错误'
if error_msg is not None:
return JsonResponse(get_ajax_msg(0, 0, error_msg, {}))
model_form.instance.belong_env_id = env_id
model_form.instance.belong_project_id = project_id
model_form.instance.belong_module_id = module_id
model_form.instance.start_time = start_time
model_form.instance.receiver_email = deal_emails(emails.split(';'))
model_form.save()
for case_id in request.POST.get('case_list').split(','):
task = TaskInfo.objects.get(task_name=request.POST.get('task_name'))
case = TestCaseInfo.objects.get(id=case_id)
task.cases.add(case)
return JsonResponse(get_ajax_msg(1, 1, '添加任务成功', {}))
else:
msg = get_validate_form_msg(model_form)
return JsonResponse(get_ajax_msg(0, 0, msg))
else:
return JsonResponse(get_ajax_msg(0, 0, '用户没有创建任务的权限'))
def task_search(request):
if request.method == 'POST':
index = int(request.POST.get('index'))
task_name = request.POST.get('task_name')
project_name = request.POST.get('project_name')
module_name = request.POST.get('module_name')
tasks = None
env_name_dic = {}
project_name_dic = {}
module_name_dic = {}
count = 0
if len(task_name) == 0 and len(project_name) == 0 and len(module_name) == 0:
return JsonResponse(get_ajax_msg(0, 0, '搜索条件无效'))
else:
tasks = TaskInfo.objects.all()
if len(module_name) != 0 and module_name != '模块名称':
tasks = tasks.filter(belong_module__module_name__contains=module_name)
if len(project_name) != 0 and project_name != '项目名称':
tasks = tasks.filter(belong_project__project_name__contains=project_name)
if len(task_name) != 0:
tasks = tasks.filter(task_name__contains=task_name)
if tasks == None:
return JsonResponse(get_ajax_msg(0, 0, '查询出错'))
if tasks != None and len(tasks) > 0:
tasks = filter_tasks_for_user(request.user, tasks.order_by('-id'), AUTH_VIEW) # 根据用户权限筛选模块
for task in tasks:
append_env_dict(task, env_name_dic)
append_project_dict(task, project_name_dic)
append_module_dict(task, module_name_dic)
count = len(tasks)
tasks = pagination_for_objects(tasks, index)
task_info_list = []
for task in tasks:
task_dict = task2Dict(task)
task_info_list.append(task_dict)
data = dataToJson(task_info_list)
return JsonResponse(get_ajax_msg(1, 1, '搜索成功', {'tasks': data, 'count': count, 'currPage': index,
'envInfo': env_name_dic, 'proInfo': project_name_dic,
'moduleInfo': module_name_dic}))
def task_delete(request):
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
if check_perm(request.user, tasks[0], AUTH_DELETE):
tasks[0].delete()
return JsonResponse(get_ajax_msg(1, 1, '删除成功', {}))
else:
return JsonResponse(get_ajax_msg(0, 0, '用户没有删除该任务的权限'))
def task_query(request):
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
tasks = filter_tasks_for_user(request.user, tasks, AUTH_VIEW)
task_info_list = []
for task in tasks:
task_dict = task2Dict(task)
task_info_list.append(task_dict)
data = dataToJson(task_info_list)
return JsonResponse(get_ajax_msg(1, 1, '获取任务成功', {'tasks': data}))
def task_update(request):
if request.method == 'POST':
task_form = TaskModelForm(request.POST)
if task_form.is_valid():
task_id = request.POST.get('id')
task_name = request.POST.get('task_name')
env_name = request.POST.get('env_name')
project_name = request.POST.get('project_name')
module_name = request.POST.get('module_name')
receiver_email = request.POST.get('receiver_email')
case_list = request.POST.get('case_list').split(',')
start_time = datetime.datetime.fromtimestamp(int(request.POST.get('start_time')) / 1000)
interval_minute = request.POST.get('interval_minute')
if request.POST.get('is_loop') == 'true':
is_loop = True
if int(interval_minute) < 1:
return JsonResponse(get_ajax_msg(0, 0, '循环间隔时间不能小于1分钟', {}))
elif request.POST.get('is_loop') == 'false':
is_loop = False
if start_time <= datetime.datetime.now():
start_time = datetime.datetime.now()
# return JsonResponse(get_ajax_msg(0, 0, '任务开始时间早于当前时间', {}))
if not validate_emails(receiver_email.split(';')):
return JsonResponse(get_ajax_msg(0, 0, '邮箱格式错误'))
# print(deal_emails(receiver_email.split(';')))
try:
task = TaskInfo.objects.get(id=task_id)
if TaskInfo.objects.filter(task_name=task_name,belong_module_id=module_name).exclude(id=task_id).exists():
return JsonResponse(get_ajax_msg(0, 0, '已存在此任务名称', {}))
if not task.is_run:
if check_perm(request.user, TaskInfo.objects.get(id=task_id), AUTH_UPDATE):
if TaskInfo.objects.update_task(task_id, task_name=task_name, env_name=env_name,
project_name=project_name,
module_name=module_name, receiver_email=deal_emails(receiver_email.split(';')),
case_list=case_list,
start_time=start_time, is_loop=is_loop,
interval_minute=interval_minute):
return JsonResponse(get_ajax_msg(1, 1, '修改任务成功', {}))
else:
return JsonResponse(get_ajax_msg(0, 0, '修改任务失败', {}))
else:
return JsonResponse(get_ajax_msg(0, 0, '用户没有修改该任务的权限'))
else:
return JsonResponse(get_ajax_msg(0, 0, '请先停止任务', {}))
except:
return JsonResponse(get_ajax_msg(0, 0, '该任务不存在', {}))
else:
msg = get_validate_form_msg(task_form)
return JsonResponse(get_ajax_msg(0, 1, msg))
def task_run(request):
global is_timer_start
global run_task_list
global run_job_dict
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
task = tasks[0]
if not task.is_run:
if task.start_time > datetime.datetime.now(): # 任务开始时间必须大于当前时间
pass
else:
task.start_time = datetime.datetime.now() + datetime.timedelta(seconds=10)
# if not is_timer_start:
# is_timer_start = True
# start_task_timer = StartTaskTimer(run_task_list, run_job_dict)
# start_task_timer.start()
run_task_list.append(task)
task.is_run = True
task.save()
connection.close()
return JsonResponse(get_ajax_msg(1, 1, '该任务成功运行'))
else:
connection.close()
return JsonResponse(get_ajax_msg(0, 0, '该任务正在运行'))
def task_stop(request):
global run_task_list
global run_job_dict
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
task = tasks[0]
if task.is_run:
task.is_run = False
task.fail_times = 0
task.save()
# if task in run_task_list:
# run_task_list.remove(task) # 从运行任务列表中删除该任务
try:
# jobs = run_job_dict[task.id]
# for job in jobs:
schedule.cancel_job(task.id)
except KeyError:
print('非循环任务')
return JsonResponse(get_ajax_msg(1, 1, '该任务成功停止'))
else:
return JsonResponse(get_ajax_msg(0, 0, '该任务没有运行'))
def task_monitor(request):
if request.method == 'GET':
return render(request, 'api/task_monitor.html')
if request.method == 'POST':
index = int(request.POST.get('index'))
search_task_name = request.POST.get('task_name')
start = (index - 1) * 10
res = requests.get('http://127.0.0.1:5555/api/tasks?limit=1000') # 控制查询最大数目为1000,以解决查询卡顿的问题
results = json.loads(res.content)
monitor_result_list = []
for result in results.values():
try:
task_dict = {}
args = result['args'].split(',')
# 获取任务信息
infos = args[1].split('-')
if '定时任务' in infos[0]:
task_name = infos[1]
case_name = infos[2]
report_uuid = args[4].split("'")[1]
task_dict['task_name'] = task_name
task_dict['case_name'] = case_name
task_dict['state'] = result['state']
task_dict['result'] = result['result']
task_dict['received'] = result['received']
task_dict['started'] = result['started']
task_dict['runtime'] = result['runtime']
task_dict['report_uuid'] = report_uuid
if search_task_name is not None:
if search_task_name in task_dict['task_name']:
monitor_result_list.append(task_dict)
else:
monitor_result_list.append(task_dict)
except Exception as e:
print('数据解析异常:' + e)
# 根据任务开始时间降序排列
for i in range(len(monitor_result_list) - 1):
for j in range(len(monitor_result_list) - i - 1):
if monitor_result_list[j]['received'] < monitor_result_list[j + 1]['received']:
monitor_result_list[j], monitor_result_list[j + 1] = monitor_result_list[j + 1], monitor_result_list[j]
data = dataToJson(monitor_result_list[start: start + 10])
return JsonResponse(get_ajax_msg(1, 1, '获取监控任务列表成功', {'monitors': data, 'count': len(monitor_result_list), 'currPage': index}))
def thread_run_case(**kwargs):
case_id = kwargs['case_id']
base_url = kwargs['base_url']
task_name = kwargs['task_name']
task_id = kwargs['task_id']
threading.Thread(target=run_case, args=(base_url, case_id, task_name, task_id)).start()
def run_case(base_url, case_id, task_name, task_id):
report_id = run_case_by_id(base_url, case_id, task_name,"定时任务",isTask=True)
time.sleep(5) # 等待报告信息写入数据库
reports = ReportInfo.objects.all().filter(report_id=report_id)
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) > 0:
task = tasks[0]
if len(reports) == 0:
# 若没有此条报告,则认为用例成功,不再需要后续操作
if len(tasks) > 0:
task.fail_times = 0
task.save()
else:
response_result = get_response_result(report_id)
if response_result != True:
task.fail_times += 1
task.save()
# 存失败记录
failRecord = TaskFailedRecord(task_id=task,report_id=reports[0].id,time=datetime.datetime.fromtimestamp(reports[0].test_time))
failRecord.save()
if task.fail_times % 2 == 0 and task.fail_times != 0:
receivers = task.receiver_email.split(';')
for receiver in receivers:
send_warn_mail(task_name, receiver, reports[0].id)
connection.close() # 避免造成mysql连接数过多的问题
def get_response_result(report_id):
response_result = True
try:
reports = ReportInfo.objects.all().filter(report_id=report_id)
if len(reports) > 0:
report = reports[0]
# print(report.result_data)
summury = json.loads(report.result_data)
stat = summury['stat']
if stat['successes'] != stat['testsRun']:
response_result = False
except Exception as e:
print('get_response_code e=====>', e)
return response_result
def send_warn_mail(task_name, receiver, report_id):
tips = task_name + ':监控到接口发生异常!查看报告地址:http://qa.15166.com/api/get_report/?id=' + str(report_id)
try:
email_title = "Joy_QA_Platform 定时任务监控接口"
email_body = tips
# 使用Django内置函数完成邮件发送。四个参数:主题,邮件内容,从哪里发,接受者list
send_status = send_mail(email_title, email_body, EMAIL_FROM, [receiver])
except Exception as e:
print(e)
def task2Dict(task):
task_dict = {}
task_dict["id"] = task.id
task_dict["task_name"] = task.task_name
task_dict["belong_env"] = task.belong_env_id
task_dict["belong_project"] = task.belong_project_id
task_dict["belong_module"] = task.belong_module_id
task_dict["receiver_email"] = task.receiver_email
task_dict["case_id_list"] = []
task_dict["case_name_list"] = []
task_dict["start_time"] = task.start_time
task_dict["is_loop"] = task.is_loop
task_dict["interval_minute"] = task.interval_minute
task_dict["is_run"] = task.is_run
task_dict["fail_times"] = task.fail_times
cases = task.cases.all()
for case in cases:
id = case.id
task_dict["case_id_list"].append(case.id)
task_dict["case_name_list"].append(case.name)
return task_dict
def append_env_dict(task, env_dict):
env_id = task.belong_env_id
env_name = task.belong_env.env_name
env_dict[str(env_id)] = env_name
def append_project_dict(task, project_dict):
project_id = task.belong_project_id
project_name = task.belong_project.project_name
project_dict[str(project_id)] = project_name
def append_module_dict(task, module_dict):
module_id = task.belong_module_id
module_name = task.belong_module.module_name
module_dict[str(module_id)] = module_name
def get_url_from_task(task):
envs = EnvInfo.objects.filter(id=task.belong_env_id)
env = envs[0]
return env.host_port
class StartTaskTimer(threading.Thread):
def __init__(self, run_task_list, run_job_dict):
threading.Thread.__init__(self)
self.run_task_list = run_task_list
self.run_job_dict = run_job_dict
def run(self):
while True:
# lst = self.run_task_list[::]
tasks = get_running_tasks()
for task in tasks:
now = datetime.datetime.now()
if task.start_time <= now <= (task.start_time + datetime.timedelta(seconds=5)):
if task.is_loop:
self.run_job_dict[task.id] = start_loop_task(task, thread_run_case)
else:
start_task(task, thread_run_case)
task.is_run = False
task.fail_times = 0
task.save()
# self.run_task_list.remove(task)
else:
pass
time.sleep(5)
mutex = threading.Lock()
def get_running_tasks():
global mutex
with mutex:
result = []
tasks = TaskInfo.objects.filter(is_run=True,is_loop=True)
now = datetime.datetime.now()
for task in tasks:
# 排除可能的重复执行
if task.start_time <= now <= (task.start_time + datetime.timedelta(seconds=5)) and (now - task.last_run_time > datetime.timedelta(seconds=5)):
result.append(task)
task.last_run_time = now
task.save()
# if datetime.datetime.now() - task.last_run_time > datetime.timedelta(seconds=task.interval_minute * 60 - 5):
# result.append(task)
connection.close()
if len(result) > 0:
for i in result:
print("获取到任务:",i.task_name)
return result
def start_loop_task(task, func):
base_url = get_url_from_task(task)
jobs = []
cases = task.cases.all()
for case in cases:
task_name = get_task_name(task, case)
func(case_id=case.id, base_url=base_url, task_name=task_name, task_id=task.id)
job = schedule.every(task.interval_minute).minutes.do(thread_run_case, case_id=case.id,
base_url=base_url, task_name=task_name, task_id=task.id)
cache.set("qa_paltform_loop_jobs_"+str(datetime.datetime.now()),pickle.dumps(job),timeout=None)
flag = cache.get("qa_test_platform_running_flag")
# print("flag==="+str(flag))
if flag != 1:
schedule.run_continuously()
# 一定要添加过期时间,否则当值过期时还会起新的线程(发现默认过期时间5分钟,这是django-redis组件和原生redis的区别)
cache.set("qa_test_platform_running_flag",1,timeout=None)
return jobs
def start_task(task, func):
base_url = get_url_from_task(task)
cases = task.cases.all()
for case in cases:
task_name = get_task_name(task, case)
func(case_id=case.id, base_url=base_url, task_name=task_name, task_id=task.id)
def get_task_name(task, case):
name = '定时任务' + '-' + task.task_name + '-' + case.name
return name
def filter_tasks_for_user(user, tasks, perm):
results = []
for task in tasks:
project = task.belong_project
if user.has_perm(perm, project):
results.append(task)
return results
def check_perm(user, task, perm):
project = task.belong_project
return user.has_perm(perm, project)
def restart_running_task():
# 清除redis中的任务缓存
cache.delete_pattern("qa_paltform_loop_jobs_*")
# 清除redis中的分布式锁,避免偶发的锁出现问题,任务会在执行器中的run_pending阻塞
cache.delete_pattern('*qa_test_platform_get')
# 增加是否已经启动了线程的标记,避免每增加一个执行任务就启动一次线程,可能导致任务重复执行
cache.delete_pattern('qa_test_platform_running_flag')
print("清除任务缓存、清除锁、清除线程启动标记")
start_task_timer = StartTaskTimer(run_task_list, run_job_dict)
start_task_timer.start()
tasks = TaskInfo.objects.filter(is_run=True, is_loop=True)
count = 0
for task in tasks:
task.start_time = datetime.datetime.now() + datetime.timedelta(seconds=10*(count+1))
task.save()
count = count + 1
connection.close() # 避免造成mysql连接数过多的问题
def validate_emails(emails):
for email in emails:
if len(email) == 0:
continue
if re.match("^[A-Z0-9a-z._%+-]+" + EMAIL_SUFFIX, email) is None:
return False
return True
def deal_emails(emails):
result = []
for email in emails:
if email not in result:
result.append(email)
resultEmail = ""
for email in result:
resultEmail = resultEmail + ";" + email
return resultEmail[1:]
|
faceai/opencv/trackbar.py | xurohanmm/faceai | 9,944 | 12769588 | #coding=utf-8
#调色板
import cv2
import numpy as np
img = np.zeros((300, 512, 3), np.uint8)
cv2.namedWindow('image')
def callback(x):
pass
#参数1:名称;参数2:作用窗口,参数3、4:最小值和最大值;参数5:值更改回调方法
cv2.createTrackbar('R', 'image', 0, 255, callback)
cv2.createTrackbar('G', 'image', 0, 255, callback)
cv2.createTrackbar('B', 'image', 0, 255, callback)
while (1):
cv2.imshow('image', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
r = cv2.getTrackbarPos('R', 'image')
g = cv2.getTrackbarPos('G', 'image')
b = cv2.getTrackbarPos('B', 'image')
img[:] = [b, g, r]
cv2.destroyAllWindows() |
Subsets and Splits