max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
0000_examples/grasping_antipodal_planning.py | huzhengtao14z/wrs | 0 | 10600 | import math
import visualization.panda.world as wd
import modeling.geometric_model as gm
import modeling.collision_model as cm
import grasping.planning.antipodal as gpa
import robot_sim.end_effectors.grippers.yumi_gripper.yumi_gripper as yg
base = wd.World(cam_pos=[1, 1, 1],w=960,
h=540, lookat_pos=[0, 0, 0])
gm.gen_frame().attach_to(base)
# object
object_tube = cm.CollisionModel("objects/tubebig.stl")
object_tube.set_rgba([.9, .75, .35, 1])
object_tube.attach_to(base)
# hnd_s
gripper_s = yg.YumiGripper()
grasp_info_list = gpa.plan_grasps(gripper_s, object_tube,
angle_between_contact_normals=math.radians(177),
openning_direction='loc_x',
max_samples=15, min_dist_between_sampled_contact_points=.005,
contact_offset=.005)
gpa.write_pickle_file('tubebig', grasp_info_list, './', 'yumi_tube_big.pickle')
for grasp_info in grasp_info_list:
jaw_width, jaw_center_pos, jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
gripper_s.grip_at_with_jcpose(jaw_center_pos, jaw_center_rotmat, jaw_width)
gripper_s.gen_meshmodel(rgba=(1,0,0,0.01)).attach_to(base)
base.run() | import math
import visualization.panda.world as wd
import modeling.geometric_model as gm
import modeling.collision_model as cm
import grasping.planning.antipodal as gpa
import robot_sim.end_effectors.grippers.yumi_gripper.yumi_gripper as yg
base = wd.World(cam_pos=[1, 1, 1],w=960,
h=540, lookat_pos=[0, 0, 0])
gm.gen_frame().attach_to(base)
# object
object_tube = cm.CollisionModel("objects/tubebig.stl")
object_tube.set_rgba([.9, .75, .35, 1])
object_tube.attach_to(base)
# hnd_s
gripper_s = yg.YumiGripper()
grasp_info_list = gpa.plan_grasps(gripper_s, object_tube,
angle_between_contact_normals=math.radians(177),
openning_direction='loc_x',
max_samples=15, min_dist_between_sampled_contact_points=.005,
contact_offset=.005)
gpa.write_pickle_file('tubebig', grasp_info_list, './', 'yumi_tube_big.pickle')
for grasp_info in grasp_info_list:
jaw_width, jaw_center_pos, jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
gripper_s.grip_at_with_jcpose(jaw_center_pos, jaw_center_rotmat, jaw_width)
gripper_s.gen_meshmodel(rgba=(1,0,0,0.01)).attach_to(base)
base.run() | en | 0.725924 | # object # hnd_s | 2.033151 | 2 |
keystone/assignment/core.py | pritha-srivastava/keystone | 0 | 10601 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Assignment service."""
import copy
import itertools
from oslo_log import log
from keystone.common import cache
from keystone.common import driver_hints
from keystone.common import manager
from keystone.common import provider_api
import keystone.conf
from keystone import exception
from keystone.i18n import _
from keystone import notifications
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
PROVIDERS = provider_api.ProviderAPIs
# This is a general cache region for assignment administration (CRUD
# operations).
MEMOIZE = cache.get_memoization_decorator(group='role')
# This builds a discrete cache region dedicated to role assignments computed
# for a given user + project/domain pair. Any write operation to add or remove
# any role assignment should invalidate this entire cache region.
COMPUTED_ASSIGNMENTS_REGION = cache.create_region(name='computed assignments')
MEMOIZE_COMPUTED_ASSIGNMENTS = cache.get_memoization_decorator(
group='role',
region=COMPUTED_ASSIGNMENTS_REGION)
@notifications.listener
class Manager(manager.Manager):
"""Default pivot point for the Assignment backend.
See :class:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
driver_namespace = 'keystone.assignment'
_provides_api = 'assignment_api'
_SYSTEM_SCOPE_TOKEN = 'system'
_USER_SYSTEM = 'UserSystem'
_GROUP_SYSTEM = 'GroupSystem'
_PROJECT = 'project'
_ROLE_REMOVED_FROM_USER = 'role_removed_from_user'
_INVALIDATION_USER_PROJECT_TOKENS = 'invalidate_user_project_tokens'
def __init__(self):
assignment_driver = CONF.assignment.driver
super(Manager, self).__init__(assignment_driver)
self.event_callbacks = {
notifications.ACTIONS.deleted: {
'domain': [self._delete_domain_assignments],
},
}
def _delete_domain_assignments(self, service, resource_type, operations,
payload):
domain_id = payload['resource_info']
self.driver.delete_domain_assignments(domain_id)
def _get_group_ids_for_user_id(self, user_id):
# TODO(morganfainberg): Implement a way to get only group_ids
# instead of the more expensive to_dict() call for each record.
return [x['id'] for
x in PROVIDERS.identity_api.list_groups_for_user(user_id)]
def list_user_ids_for_project(self, tenant_id):
PROVIDERS.resource_api.get_project(tenant_id)
assignment_list = self.list_role_assignments(
project_id=tenant_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['user_id'] for x in assignment_list]))
def _send_app_cred_notification_for_role_removal(self, role_id):
"""Delete all application credential for a specific role.
:param role_id: role identifier
:type role_id: string
"""
assignments = self.list_role_assignments(role_id=role_id)
for assignment in assignments:
if 'user_id' in assignment and 'project_id' in assignment:
payload = {
'user_id': assignment['user_id'],
'project_id': assignment['project_id']
}
notifications.Audit.internal(
notifications.REMOVE_APP_CREDS_FOR_USER, payload
)
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_user_and_project(self, user_id, tenant_id):
"""Get the roles associated with a user within given project.
This includes roles directly assigned to the user on the
project, as well as those by virtue of group membership or
inheritance.
:returns: a list of role ids.
:raises keystone.exception.ProjectNotFound: If the project doesn't
exist.
"""
PROVIDERS.resource_api.get_project(tenant_id)
assignment_list = self.list_role_assignments(
user_id=user_id, project_id=tenant_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_trustor_and_project(self, trustor_id, project_id):
"""Get the roles associated with a trustor within given project.
This includes roles directly assigned to the trustor on the
project, as well as those by virtue of group membership or
inheritance, but it doesn't include the domain roles.
:returns: a list of role ids.
:raises keystone.exception.ProjectNotFound: If the project doesn't
exist.
"""
PROVIDERS.resource_api.get_project(project_id)
assignment_list = self.list_role_assignments(
user_id=trustor_id, project_id=project_id, effective=True,
strip_domain_roles=False)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_user_and_domain(self, user_id, domain_id):
"""Get the roles associated with a user within given domain.
:returns: a list of role ids.
:raises keystone.exception.DomainNotFound: If the domain doesn't exist.
"""
PROVIDERS.resource_api.get_domain(domain_id)
assignment_list = self.list_role_assignments(
user_id=user_id, domain_id=domain_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
def get_roles_for_groups(self, group_ids, project_id=None, domain_id=None):
"""Get a list of roles for this group on domain and/or project."""
# if no group ids were passed, there are no roles. Without this check,
# all assignments for the project or domain will be fetched,
# which is not what we want.
if not group_ids:
return []
if project_id is not None:
PROVIDERS.resource_api.get_project(project_id)
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, project_id=project_id,
effective=True)
elif domain_id is not None:
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, domain_id=domain_id,
effective=True)
else:
raise AttributeError(_("Must specify either domain or project"))
role_ids = list(set([x['role_id'] for x in assignment_list]))
return PROVIDERS.role_api.list_roles_from_ids(role_ids)
@notifications.role_assignment('created')
def _add_role_to_user_and_project_adapter(self, role_id, user_id=None,
group_id=None, domain_id=None,
project_id=None,
inherited_to_projects=False,
context=None):
# The parameters for this method must match the parameters for
# create_grant so that the notifications.role_assignment decorator
# will work.
PROVIDERS.resource_api.get_project(project_id)
PROVIDERS.role_api.get_role(role_id)
self.driver.add_role_to_user_and_project(user_id, project_id, role_id)
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
self._add_role_to_user_and_project_adapter(
role_id, user_id=user_id, project_id=tenant_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# TODO(henry-nash): We might want to consider list limiting this at some
# point in the future.
@MEMOIZE_COMPUTED_ASSIGNMENTS
def list_projects_for_user(self, user_id):
# FIXME(lbragstad): Without the use of caching, listing effective role
# assignments is slow, especially with large data set (lots of users
# with multiple role assignments). This should serve as a marker in
# case we have the opportunity to come back and optimize this code so
# that it can be performant without having a hard dependency on
# caching. Please see https://bugs.launchpad.net/keystone/+bug/1700852
# for more details.
assignment_list = self.list_role_assignments(
user_id=user_id, effective=True)
# Use set() to process the list to remove any duplicates
project_ids = list(set([x['project_id'] for x in assignment_list
if x.get('project_id')]))
return PROVIDERS.resource_api.list_projects_from_ids(project_ids)
# TODO(henry-nash): We might want to consider list limiting this at some
# point in the future.
@MEMOIZE_COMPUTED_ASSIGNMENTS
def list_domains_for_user(self, user_id):
assignment_list = self.list_role_assignments(
user_id=user_id, effective=True)
# Use set() to process the list to remove any duplicates
domain_ids = list(set([x['domain_id'] for x in assignment_list
if x.get('domain_id')]))
return PROVIDERS.resource_api.list_domains_from_ids(domain_ids)
def list_domains_for_groups(self, group_ids):
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, effective=True)
domain_ids = list(set([x['domain_id'] for x in assignment_list
if x.get('domain_id')]))
return PROVIDERS.resource_api.list_domains_from_ids(domain_ids)
def list_projects_for_groups(self, group_ids):
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, effective=True)
project_ids = list(set([x['project_id'] for x in assignment_list
if x.get('project_id')]))
return PROVIDERS.resource_api.list_projects_from_ids(project_ids)
@notifications.role_assignment('deleted')
def _remove_role_from_user_and_project_adapter(self, role_id, user_id=None,
group_id=None,
domain_id=None,
project_id=None,
inherited_to_projects=False,
context=None):
# The parameters for this method must match the parameters for
# delete_grant so that the notifications.role_assignment decorator
# will work.
self.driver.remove_role_from_user_and_project(user_id, project_id,
role_id)
payload = {'user_id': user_id, 'project_id': project_id}
notifications.Audit.internal(
notifications.REMOVE_APP_CREDS_FOR_USER,
payload
)
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
self._remove_role_from_user_and_project_adapter(
role_id, user_id=user_id, project_id=tenant_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
def _invalidate_token_cache(self, role_id, group_id, user_id, project_id,
domain_id):
if group_id:
actor_type = 'group'
actor_id = group_id
elif user_id:
actor_type = 'user'
actor_id = user_id
if domain_id:
target_type = 'domain'
target_id = domain_id
elif project_id:
target_type = 'project'
target_id = project_id
reason = (
'Invalidating the token cache because role %(role_id)s was '
'removed from %(actor_type)s %(actor_id)s on %(target_type)s '
'%(target_id)s.' %
{'role_id': role_id, 'actor_type': actor_type,
'actor_id': actor_id, 'target_type': target_type,
'target_id': target_id}
)
notifications.invalidate_token_cache_notification(reason)
@notifications.role_assignment('created')
def create_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False,
initiator=None):
role = PROVIDERS.role_api.get_role(role_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
project = PROVIDERS.resource_api.get_project(project_id)
# For domain specific roles, the domain of the project
# and role must match
if role['domain_id'] and project['domain_id'] != role['domain_id']:
raise exception.DomainSpecificRoleMismatch(
role_id=role_id,
project_id=project_id)
self.driver.create_grant(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
def get_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
role_ref = PROVIDERS.role_api.get_role(role_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
self.check_grant_role_id(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
return role_ref
def list_grants(self, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
grant_ids = self.list_grant_role_ids(
user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
@notifications.role_assignment('deleted')
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False,
initiator=None):
# check if role exist before any processing
PROVIDERS.role_api.get_role(role_id)
if group_id is None:
# check if role exists on the user before revoke
self.check_grant_role_id(
role_id, user_id=user_id, group_id=None, domain_id=domain_id,
project_id=project_id,
inherited_to_projects=inherited_to_projects
)
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
else:
try:
# check if role exists on the group before revoke
self.check_grant_role_id(
role_id, user_id=None, group_id=group_id,
domain_id=domain_id, project_id=project_id,
inherited_to_projects=inherited_to_projects
)
if CONF.token.revoke_by_id:
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
except exception.GroupNotFound:
LOG.debug('Group %s not found, no tokens to invalidate.',
group_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
self.driver.delete_grant(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# The methods _expand_indirect_assignment, _list_direct_role_assignments
# and _list_effective_role_assignments below are only used on
# list_role_assignments, but they are not in its scope as nested functions
# since it would significantly increase McCabe complexity, that should be
# kept as it is in order to detect unnecessarily complex code, which is not
# this case.
def _expand_indirect_assignment(self, ref, user_id=None, project_id=None,
subtree_ids=None, expand_groups=True):
"""Return a list of expanded role assignments.
This methods is called for each discovered assignment that either needs
a group assignment expanded into individual user assignments, or needs
an inherited assignment to be applied to its children.
In all cases, if either user_id and/or project_id is specified, then we
filter the result on those values.
If project_id is specified and subtree_ids is None, then this
indicates that we are only interested in that one project. If
subtree_ids is not None, then this is an indicator that any
inherited assignments need to be expanded down the tree. The
actual subtree_ids don't need to be used as a filter here, since we
already ensured only those assignments that could affect them
were passed to this method.
If expand_groups is True then we expand groups out to a list of
assignments, one for each member of that group.
"""
def create_group_assignment(base_ref, user_id):
"""Create a group assignment from the provided ref."""
ref = copy.deepcopy(base_ref)
ref['user_id'] = user_id
indirect = ref.setdefault('indirect', {})
indirect['group_id'] = ref.pop('group_id')
return ref
def expand_group_assignment(ref, user_id):
"""Expand group role assignment.
For any group role assignment on a target, it is replaced by a list
of role assignments containing one for each user of that group on
that target.
An example of accepted ref is::
{
'group_id': group_id,
'project_id': project_id,
'role_id': role_id
}
Once expanded, it should be returned as a list of entities like the
one below, one for each each user_id in the provided group_id.
::
{
'user_id': user_id,
'project_id': project_id,
'role_id': role_id,
'indirect' : {
'group_id': group_id
}
}
Returned list will be formatted by the Controller, which will
deduce a role assignment came from group membership if it has both
'user_id' in the main body of the dict and 'group_id' in indirect
subdict.
"""
if user_id:
return [create_group_assignment(ref, user_id=user_id)]
# Note(prashkre): Try to get the users in a group,
# if a group wasn't found in the backend, users are set
# as empty list.
try:
users = PROVIDERS.identity_api.list_users_in_group(
ref['group_id'])
except exception.GroupNotFound:
LOG.warning('Group %(group)s was not found but still has role '
'assignments.', {'group': ref['group_id']})
users = []
return [create_group_assignment(ref, user_id=m['id'])
for m in users]
def expand_inherited_assignment(ref, user_id, project_id, subtree_ids,
expand_groups):
"""Expand inherited role assignments.
If expand_groups is True and this is a group role assignment on a
target, replace it by a list of role assignments containing one for
each user of that group, on every project under that target. If
expand_groups is False, then return a group assignment on an
inherited target.
If this is a user role assignment on a specific target (i.e.
project_id is specified, but subtree_ids is None) then simply
format this as a single assignment (since we are effectively
filtering on project_id). If however, project_id is None or
subtree_ids is not None, then replace this one assignment with a
list of role assignments for that user on every project under
that target.
An example of accepted ref is::
{
'group_id': group_id,
'project_id': parent_id,
'role_id': role_id,
'inherited_to_projects': 'projects'
}
Once expanded, it should be returned as a list of entities like the
one below, one for each each user_id in the provided group_id and
for each subproject_id in the project_id subtree.
::
{
'user_id': user_id,
'project_id': subproject_id,
'role_id': role_id,
'indirect' : {
'group_id': group_id,
'project_id': parent_id
}
}
Returned list will be formatted by the Controller, which will
deduce a role assignment came from group membership if it has both
'user_id' in the main body of the dict and 'group_id' in the
'indirect' subdict, as well as it is possible to deduce if it has
come from inheritance if it contains both a 'project_id' in the
main body of the dict and 'parent_id' in the 'indirect' subdict.
"""
def create_inherited_assignment(base_ref, project_id):
"""Create a project assignment from the provided ref.
base_ref can either be a project or domain inherited
assignment ref.
"""
ref = copy.deepcopy(base_ref)
indirect = ref.setdefault('indirect', {})
if ref.get('project_id'):
indirect['project_id'] = ref.pop('project_id')
else:
indirect['domain_id'] = ref.pop('domain_id')
ref['project_id'] = project_id
ref.pop('inherited_to_projects')
return ref
# Define expanded project list to which to apply this assignment
if project_id:
# Since ref is an inherited assignment and we are filtering by
# project(s), we are only going to apply the assignment to the
# relevant project(s)
project_ids = [project_id]
if subtree_ids:
project_ids += subtree_ids
# If this is a domain inherited assignment, then we know
# that all the project_ids will get this assignment. If
# it's a project inherited assignment, and the assignment
# point is an ancestor of project_id, then we know that
# again all the project_ids will get the assignment. If,
# however, the assignment point is within the subtree,
# then only a partial tree will get the assignment.
resource_api = PROVIDERS.resource_api
if ref.get('project_id'):
if ref['project_id'] in project_ids:
project_ids = (
[x['id'] for x in
resource_api.list_projects_in_subtree(
ref['project_id'])])
elif ref.get('domain_id'):
# A domain inherited assignment, so apply it to all projects
# in this domain
project_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_domain(
ref['domain_id'])])
else:
# It must be a project assignment, so apply it to its subtree
project_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_subtree(
ref['project_id'])])
new_refs = []
if 'group_id' in ref:
if expand_groups:
# Expand role assignment to all group members on any
# inherited target of any of the projects
for ref in expand_group_assignment(ref, user_id):
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
else:
# Just place the group assignment on any inherited target
# of any of the projects
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
else:
# Expand role assignment for all projects
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
return new_refs
if ref.get('inherited_to_projects') == 'projects':
return expand_inherited_assignment(
ref, user_id, project_id, subtree_ids, expand_groups)
elif 'group_id' in ref and expand_groups:
return expand_group_assignment(ref, user_id)
return [ref]
def add_implied_roles(self, role_refs):
"""Expand out implied roles.
The role_refs passed in have had all inheritance and group assignments
expanded out. We now need to look at the role_id in each ref and see
if it is a prior role for some implied roles. If it is, then we need to
duplicate that ref, one for each implied role. We store the prior role
in the indirect dict that is part of such a duplicated ref, so that a
caller can determine where the assignment came from.
"""
def _make_implied_ref_copy(prior_ref, implied_role_id):
# Create a ref for an implied role from the ref of a prior role,
# setting the new role_id to be the implied role and the indirect
# role_id to be the prior role
implied_ref = copy.deepcopy(prior_ref)
implied_ref['role_id'] = implied_role_id
indirect = implied_ref.setdefault('indirect', {})
indirect['role_id'] = prior_ref['role_id']
return implied_ref
if not CONF.token.infer_roles:
return role_refs
try:
implied_roles_cache = {}
role_refs_to_check = list(role_refs)
ref_results = list(role_refs)
checked_role_refs = list()
while(role_refs_to_check):
next_ref = role_refs_to_check.pop()
checked_role_refs.append(next_ref)
next_role_id = next_ref['role_id']
if next_role_id in implied_roles_cache:
implied_roles = implied_roles_cache[next_role_id]
else:
implied_roles = (
PROVIDERS.role_api.list_implied_roles(next_role_id))
implied_roles_cache[next_role_id] = implied_roles
for implied_role in implied_roles:
implied_ref = (
_make_implied_ref_copy(
next_ref, implied_role['implied_role_id']))
if implied_ref in checked_role_refs:
# Avoid traversing a cycle
continue
else:
ref_results.append(implied_ref)
role_refs_to_check.append(implied_ref)
except exception.NotImplemented:
LOG.error('Role driver does not support implied roles.')
return ref_results
def _filter_by_role_id(self, role_id, ref_results):
# if we arrive here, we need to filer by role_id.
filter_results = []
for ref in ref_results:
if ref['role_id'] == role_id:
filter_results.append(ref)
return filter_results
def _strip_domain_roles(self, role_refs):
"""Post process assignment list for domain roles.
Domain roles are only designed to do the job of inferring other roles
and since that has been done before this method is called, we need to
remove any assignments that include a domain role.
"""
def _role_is_global(role_id):
ref = PROVIDERS.role_api.get_role(role_id)
return (ref['domain_id'] is None)
filter_results = []
for ref in role_refs:
if _role_is_global(ref['role_id']):
filter_results.append(ref)
return filter_results
def _list_effective_role_assignments(self, role_id, user_id, group_id,
domain_id, project_id, subtree_ids,
inherited, source_from_group_ids,
strip_domain_roles):
"""List role assignments in effective mode.
When using effective mode, besides the direct assignments, the indirect
ones that come from grouping or inheritance are retrieved and will then
be expanded.
The resulting list of assignments will be filtered by the provided
parameters. If subtree_ids is not None, then we also want to include
all subtree_ids in the filter as well. Since we are in effective mode,
group can never act as a filter (since group assignments are expanded
into user roles) and domain can only be filter if we want non-inherited
assignments, since domains can't inherit assignments.
The goal of this method is to only ask the driver for those
assignments as could effect the result based on the parameter filters
specified, hence avoiding retrieving a huge list.
"""
def list_role_assignments_for_actor(
role_id, inherited, user_id=None, group_ids=None,
project_id=None, subtree_ids=None, domain_id=None):
"""List role assignments for actor on target.
List direct and indirect assignments for an actor, optionally
for a given target (i.e. projects or domain).
:param role_id: List for a specific role, can be None meaning all
roles
:param inherited: Indicates whether inherited assignments or only
direct assignments are required. If None, then
both are required.
:param user_id: If not None, list only assignments that affect this
user.
:param group_ids: A list of groups required. Only one of user_id
and group_ids can be specified
:param project_id: If specified, only include those assignments
that affect at least this project, with
additionally any projects specified in
subtree_ids
:param subtree_ids: The list of projects in the subtree. If
specified, also include those assignments that
affect these projects. These projects are
guaranteed to be in the same domain as the
project specified in project_id. subtree_ids
can only be specified if project_id has also
been specified.
:param domain_id: If specified, only include those assignments
that affect this domain - by definition this will
not include any inherited assignments
:returns: List of assignments matching the criteria. Any inherited
or group assignments that could affect the resulting
response are included.
"""
project_ids_of_interest = None
if project_id:
if subtree_ids:
project_ids_of_interest = subtree_ids + [project_id]
else:
project_ids_of_interest = [project_id]
# List direct project role assignments
non_inherited_refs = []
if inherited is False or inherited is None:
# Get non inherited assignments
non_inherited_refs = self.driver.list_role_assignments(
role_id=role_id, domain_id=domain_id,
project_ids=project_ids_of_interest, user_id=user_id,
group_ids=group_ids, inherited_to_projects=False)
inherited_refs = []
if inherited is True or inherited is None:
# Get inherited assignments
if project_id:
# The project and any subtree are guaranteed to be owned by
# the same domain, so since we are filtering by these
# specific projects, then we can only get inherited
# assignments from their common domain or from any of
# their parents projects.
# List inherited assignments from the project's domain
proj_domain_id = PROVIDERS.resource_api.get_project(
project_id)['domain_id']
inherited_refs += self.driver.list_role_assignments(
role_id=role_id, domain_id=proj_domain_id,
user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
# For inherited assignments from projects, since we know
# they are from the same tree the only places these can
# come from are from parents of the main project or
# inherited assignments on the project or subtree itself.
source_ids = [project['id'] for project in
PROVIDERS.resource_api.list_project_parents(
project_id)]
if subtree_ids:
source_ids += project_ids_of_interest
if source_ids:
inherited_refs += self.driver.list_role_assignments(
role_id=role_id, project_ids=source_ids,
user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
else:
# List inherited assignments without filtering by target
inherited_refs = self.driver.list_role_assignments(
role_id=role_id, user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
return non_inherited_refs + inherited_refs
# If filtering by group or inherited domain assignment the list is
# guaranteed to be empty
if group_id or (domain_id and inherited):
return []
if user_id and source_from_group_ids:
# You can't do both - and since source_from_group_ids is only used
# internally, this must be a coding error by the caller.
msg = _('Cannot list assignments sourced from groups and filtered '
'by user ID.')
raise exception.UnexpectedError(msg)
# If filtering by domain, then only non-inherited assignments are
# relevant, since domains don't inherit assignments
inherited = False if domain_id else inherited
# List user or explicit group assignments.
# Due to the need to expand implied roles, this call will skip
# filtering by role_id and instead return the whole set of roles.
# Matching on the specified role is performed at the end.
direct_refs = list_role_assignments_for_actor(
role_id=None, user_id=user_id, group_ids=source_from_group_ids,
project_id=project_id, subtree_ids=subtree_ids,
domain_id=domain_id, inherited=inherited)
# And those from the user's groups, so long as we are not restricting
# to a set of source groups (in which case we already got those
# assignments in the direct listing above).
group_refs = []
if not source_from_group_ids and user_id:
group_ids = self._get_group_ids_for_user_id(user_id)
if group_ids:
group_refs = list_role_assignments_for_actor(
role_id=None, project_id=project_id,
subtree_ids=subtree_ids, group_ids=group_ids,
domain_id=domain_id, inherited=inherited)
# Expand grouping and inheritance on retrieved role assignments
refs = []
expand_groups = (source_from_group_ids is None)
for ref in (direct_refs + group_refs):
refs += self._expand_indirect_assignment(
ref, user_id, project_id, subtree_ids, expand_groups)
refs = self.add_implied_roles(refs)
if strip_domain_roles:
refs = self._strip_domain_roles(refs)
if role_id:
refs = self._filter_by_role_id(role_id, refs)
return refs
def _list_direct_role_assignments(self, role_id, user_id, group_id, system,
domain_id, project_id, subtree_ids,
inherited):
"""List role assignments without applying expansion.
Returns a list of direct role assignments, where their attributes match
the provided filters. If subtree_ids is not None, then we also want to
include all subtree_ids in the filter as well.
"""
group_ids = [group_id] if group_id else None
project_ids_of_interest = None
if project_id:
if subtree_ids:
project_ids_of_interest = subtree_ids + [project_id]
else:
project_ids_of_interest = [project_id]
project_and_domain_assignments = []
if not system:
project_and_domain_assignments = self.driver.list_role_assignments(
role_id=role_id, user_id=user_id, group_ids=group_ids,
domain_id=domain_id, project_ids=project_ids_of_interest,
inherited_to_projects=inherited)
system_assignments = []
if system or (not project_id and not domain_id and not system):
if user_id:
assignments = self.list_system_grants_for_user(user_id)
for assignment in assignments:
system_assignments.append(
{'system': {'all': True},
'user_id': user_id,
'role_id': assignment['id']}
)
elif group_id:
assignments = self.list_system_grants_for_group(group_id)
for assignment in assignments:
system_assignments.append(
{'system': {'all': True},
'group_id': group_id,
'role_id': assignment['id']}
)
else:
assignments = self.list_all_system_grants()
for assignment in assignments:
a = {}
if assignment['type'] == self._GROUP_SYSTEM:
a['group_id'] = assignment['actor_id']
elif assignment['type'] == self._USER_SYSTEM:
a['user_id'] = assignment['actor_id']
a['role_id'] = assignment['role_id']
a['system'] = {'all': True}
system_assignments.append(a)
for i, assignment in enumerate(system_assignments):
if role_id and role_id != assignment['role_id']:
system_assignments.pop(i)
assignments = []
for assignment in itertools.chain(
project_and_domain_assignments, system_assignments):
assignments.append(assignment)
return assignments
def list_role_assignments(self, role_id=None, user_id=None, group_id=None,
system=None, domain_id=None, project_id=None,
include_subtree=False, inherited=None,
effective=None, include_names=False,
source_from_group_ids=None,
strip_domain_roles=True):
"""List role assignments, honoring effective mode and provided filters.
Returns a list of role assignments, where their attributes match the
provided filters (role_id, user_id, group_id, domain_id, project_id and
inherited). If include_subtree is True, then assignments on all
descendants of the project specified by project_id are also included.
The inherited filter defaults to None, meaning to get both
non-inherited and inherited role assignments.
If effective mode is specified, this means that rather than simply
return the assignments that match the filters, any group or
inheritance assignments will be expanded. Group assignments will
become assignments for all the users in that group, and inherited
assignments will be shown on the projects below the assignment point.
Think of effective mode as being the list of assignments that actually
affect a user, for example the roles that would be placed in a token.
If include_names is set to true the entities' names are returned
in addition to their id's.
source_from_group_ids is a list of group IDs and, if specified, then
only those assignments that are derived from membership of these groups
are considered, and any such assignments will not be expanded into
their user membership assignments. This is different to a group filter
of the resulting list, instead being a restriction on which assignments
should be considered before expansion of inheritance. This option is
only used internally (i.e. it is not exposed at the API level) and is
only supported in effective mode (since in regular mode there is no
difference between this and a group filter, other than it is a list of
groups).
In effective mode, any domain specific roles are usually stripped from
the returned assignments (since such roles are not placed in tokens).
This stripping can be disabled by specifying strip_domain_roles=False,
which is useful for internal calls like trusts which need to examine
the full set of roles.
"""
subtree_ids = None
if project_id and include_subtree:
subtree_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_subtree(
project_id)])
if system != 'all':
system = None
if effective:
role_assignments = self._list_effective_role_assignments(
role_id, user_id, group_id, domain_id, project_id,
subtree_ids, inherited, source_from_group_ids,
strip_domain_roles)
else:
role_assignments = self._list_direct_role_assignments(
role_id, user_id, group_id, system, domain_id, project_id,
subtree_ids, inherited)
if include_names:
return self._get_names_from_role_assignments(role_assignments)
return role_assignments
def _get_names_from_role_assignments(self, role_assignments):
role_assign_list = []
for role_asgmt in role_assignments:
new_assign = copy.deepcopy(role_asgmt)
for key, value in role_asgmt.items():
if key == 'domain_id':
_domain = PROVIDERS.resource_api.get_domain(value)
new_assign['domain_name'] = _domain['name']
elif key == 'user_id':
try:
# Note(knikolla): Try to get the user, otherwise
# if the user wasn't found in the backend
# use empty values.
_user = PROVIDERS.identity_api.get_user(value)
except exception.UserNotFound:
msg = ('User %(user)s not found in the'
' backend but still has role assignments.')
LOG.warning(msg, {'user': value})
new_assign['user_name'] = ''
new_assign['user_domain_id'] = ''
new_assign['user_domain_name'] = ''
else:
new_assign['user_name'] = _user['name']
new_assign['user_domain_id'] = _user['domain_id']
new_assign['user_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_user['domain_id'])['name'])
elif key == 'group_id':
try:
# Note(knikolla): Try to get the group, otherwise
# if the group wasn't found in the backend
# use empty values.
_group = PROVIDERS.identity_api.get_group(value)
except exception.GroupNotFound:
msg = ('Group %(group)s not found in the'
' backend but still has role assignments.')
LOG.warning(msg, {'group': value})
new_assign['group_name'] = ''
new_assign['group_domain_id'] = ''
new_assign['group_domain_name'] = ''
else:
new_assign['group_name'] = _group['name']
new_assign['group_domain_id'] = _group['domain_id']
new_assign['group_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_group['domain_id'])['name'])
elif key == 'project_id':
_project = PROVIDERS.resource_api.get_project(value)
new_assign['project_name'] = _project['name']
new_assign['project_domain_id'] = _project['domain_id']
new_assign['project_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_project['domain_id'])['name'])
elif key == 'role_id':
_role = PROVIDERS.role_api.get_role(value)
new_assign['role_name'] = _role['name']
if _role['domain_id'] is not None:
new_assign['role_domain_id'] = _role['domain_id']
new_assign['role_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_role['domain_id'])['name'])
role_assign_list.append(new_assign)
return role_assign_list
def delete_group_assignments(self, group_id):
# FIXME(lbragstad): This should be refactored in the Rocky release so
# that we can pass the group_id to the system assignment backend like
# we do with the project and domain assignment backend. Holding off on
# this because it will require an interface change to the backend,
# making it harder to backport for Queens RC.
self.driver.delete_group_assignments(group_id)
system_assignments = self.list_system_grants_for_group(group_id)
for assignment in system_assignments:
self.delete_system_grant_for_group(group_id, assignment['id'])
def delete_user_assignments(self, user_id):
# FIXME(lbragstad): This should be refactored in the Rocky release so
# that we can pass the user_id to the system assignment backend like we
# do with the project and domain assignment backend. Holding off on
# this because it will require an interface change to the backend,
# making it harder to backport for Queens RC.
self.driver.delete_user_assignments(user_id)
system_assignments = self.list_system_grants_for_user(user_id)
for assignment in system_assignments:
self.delete_system_grant_for_user(user_id, assignment['id'])
def check_system_grant_for_user(self, user_id, role_id):
"""Check if a user has a specific role on the system.
:param user_id: the ID of the user in the assignment
:param role_id: the ID of the system role in the assignment
:raises keystone.exception.RoleAssignmentNotFound: if the user doesn't
have a role assignment matching the role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
return self.driver.check_system_grant(
role_id, user_id, target_id, inherited
)
def list_system_grants_for_user(self, user_id):
"""Return a list of roles the user has on the system.
:param user_id: the ID of the user
:returns: a list of role assignments the user has system-wide
"""
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._USER_SYSTEM
grants = self.driver.list_system_grants(
user_id, target_id, assignment_type
)
grant_ids = []
for grant in grants:
grant_ids.append(grant['role_id'])
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
def create_system_grant_for_user(self, user_id, role_id):
"""Grant a user a role on the system.
:param user_id: the ID of the user
:param role_id: the ID of the role to grant on the system
"""
role = PROVIDERS.role_api.get_role(role_id)
if role.get('domain_id'):
raise exception.ValidationError(
'Role %(role_id)s is a domain-specific role. Unable to use '
'a domain-specific role in a system assignment.' % {
'role_id': role_id
}
)
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._USER_SYSTEM
inherited = False
self.driver.create_system_grant(
role_id, user_id, target_id, assignment_type, inherited
)
def delete_system_grant_for_user(self, user_id, role_id):
"""Remove a system grant from a user.
:param user_id: the ID of the user
:param role_id: the ID of the role to remove from the user on the
system
:raises keystone.exception.RoleAssignmentNotFound: if the user doesn't
have a role assignment with role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
self.driver.delete_system_grant(role_id, user_id, target_id, inherited)
def check_system_grant_for_group(self, group_id, role_id):
"""Check if a group has a specific role on the system.
:param group_id: the ID of the group in the assignment
:param role_id: the ID of the system role in the assignment
:raises keystone.exception.RoleAssignmentNotFound: if the group doesn't
have a role assignment matching the role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
return self.driver.check_system_grant(
role_id, group_id, target_id, inherited
)
def list_system_grants_for_group(self, group_id):
"""Return a list of roles the group has on the system.
:param group_id: the ID of the group
:returns: a list of role assignments the group has system-wide
"""
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._GROUP_SYSTEM
grants = self.driver.list_system_grants(
group_id, target_id, assignment_type
)
grant_ids = []
for grant in grants:
grant_ids.append(grant['role_id'])
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
def create_system_grant_for_group(self, group_id, role_id):
"""Grant a group a role on the system.
:param group_id: the ID of the group
:param role_id: the ID of the role to grant on the system
"""
role = PROVIDERS.role_api.get_role(role_id)
if role.get('domain_id'):
raise exception.ValidationError(
'Role %(role_id)s is a domain-specific role. Unable to use '
'a domain-specific role in a system assignment.' % {
'role_id': role_id
}
)
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._GROUP_SYSTEM
inherited = False
self.driver.create_system_grant(
role_id, group_id, target_id, assignment_type, inherited
)
def delete_system_grant_for_group(self, group_id, role_id):
"""Remove a system grant from a group.
:param group_id: the ID of the group
:param role_id: the ID of the role to remove from the group on the
system
:raises keystone.exception.RoleAssignmentNotFound: if the group doesn't
have a role assignment with role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
self.driver.delete_system_grant(
role_id, group_id, target_id, inherited
)
def list_all_system_grants(self):
"""Return a list of all system grants."""
actor_id = None
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = None
return self.driver.list_system_grants(
actor_id, target_id, assignment_type
)
class RoleManager(manager.Manager):
"""Default pivot point for the Role backend."""
driver_namespace = 'keystone.role'
_provides_api = 'role_api'
_ROLE = 'role'
def __init__(self):
# If there is a specific driver specified for role, then use it.
# Otherwise retrieve the driver type from the assignment driver.
role_driver = CONF.role.driver
if role_driver is None:
# Explicitly load the assignment manager object
assignment_driver = CONF.assignment.driver
assignment_manager_obj = manager.load_driver(
Manager.driver_namespace,
assignment_driver)
role_driver = assignment_manager_obj.default_role_driver()
super(RoleManager, self).__init__(role_driver)
@MEMOIZE
def get_role(self, role_id):
return self.driver.get_role(role_id)
def get_unique_role_by_name(self, role_name, hints=None):
if not hints:
hints = driver_hints.Hints()
hints.add_filter("name", role_name, case_sensitive=True)
found_roles = PROVIDERS.role_api.list_roles(hints)
if not found_roles:
raise exception.RoleNotFound(
_("Role %s is not defined") % role_name
)
elif len(found_roles) == 1:
return {'id': found_roles[0]['id']}
else:
raise exception.AmbiguityError(resource='role',
name=role_name)
def create_role(self, role_id, role, initiator=None):
ret = self.driver.create_role(role_id, role)
notifications.Audit.created(self._ROLE, role_id, initiator)
if MEMOIZE.should_cache(ret):
self.get_role.set(ret, self, role_id)
return ret
@manager.response_truncated
def list_roles(self, hints=None):
return self.driver.list_roles(hints or driver_hints.Hints())
def update_role(self, role_id, role, initiator=None):
original_role = self.driver.get_role(role_id)
if ('domain_id' in role and
role['domain_id'] != original_role['domain_id']):
raise exception.ValidationError(
message=_('Update of `domain_id` is not allowed.'))
ret = self.driver.update_role(role_id, role)
notifications.Audit.updated(self._ROLE, role_id, initiator)
self.get_role.invalidate(self, role_id)
return ret
def delete_role(self, role_id, initiator=None):
PROVIDERS.assignment_api.delete_role_assignments(role_id)
PROVIDERS.assignment_api._send_app_cred_notification_for_role_removal(
role_id
)
self.driver.delete_role(role_id)
notifications.Audit.deleted(self._ROLE, role_id, initiator)
self.get_role.invalidate(self, role_id)
reason = (
'Invalidating the token cache because role %(role_id)s has been '
'removed. Role assignments for users will be recalculated and '
'enforced accordingly the next time they authenticate or validate '
'a token' % {'role_id': role_id}
)
notifications.invalidate_token_cache_notification(reason)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# TODO(ayoung): Add notification
def create_implied_role(self, prior_role_id, implied_role_id):
implied_role = self.driver.get_role(implied_role_id)
prior_role = self.driver.get_role(prior_role_id)
if implied_role['name'] in CONF.assignment.prohibited_implied_role:
raise exception.InvalidImpliedRole(role_id=implied_role_id)
if prior_role['domain_id'] is None and implied_role['domain_id']:
msg = _('Global role cannot imply a domain-specific role')
raise exception.InvalidImpliedRole(msg,
role_id=implied_role_id)
response = self.driver.create_implied_role(
prior_role_id, implied_role_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
return response
def delete_implied_role(self, prior_role_id, implied_role_id):
self.driver.delete_implied_role(prior_role_id, implied_role_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
| # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Assignment service."""
import copy
import itertools
from oslo_log import log
from keystone.common import cache
from keystone.common import driver_hints
from keystone.common import manager
from keystone.common import provider_api
import keystone.conf
from keystone import exception
from keystone.i18n import _
from keystone import notifications
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
PROVIDERS = provider_api.ProviderAPIs
# This is a general cache region for assignment administration (CRUD
# operations).
MEMOIZE = cache.get_memoization_decorator(group='role')
# This builds a discrete cache region dedicated to role assignments computed
# for a given user + project/domain pair. Any write operation to add or remove
# any role assignment should invalidate this entire cache region.
COMPUTED_ASSIGNMENTS_REGION = cache.create_region(name='computed assignments')
MEMOIZE_COMPUTED_ASSIGNMENTS = cache.get_memoization_decorator(
group='role',
region=COMPUTED_ASSIGNMENTS_REGION)
@notifications.listener
class Manager(manager.Manager):
"""Default pivot point for the Assignment backend.
See :class:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
driver_namespace = 'keystone.assignment'
_provides_api = 'assignment_api'
_SYSTEM_SCOPE_TOKEN = 'system'
_USER_SYSTEM = 'UserSystem'
_GROUP_SYSTEM = 'GroupSystem'
_PROJECT = 'project'
_ROLE_REMOVED_FROM_USER = 'role_removed_from_user'
_INVALIDATION_USER_PROJECT_TOKENS = 'invalidate_user_project_tokens'
def __init__(self):
assignment_driver = CONF.assignment.driver
super(Manager, self).__init__(assignment_driver)
self.event_callbacks = {
notifications.ACTIONS.deleted: {
'domain': [self._delete_domain_assignments],
},
}
def _delete_domain_assignments(self, service, resource_type, operations,
payload):
domain_id = payload['resource_info']
self.driver.delete_domain_assignments(domain_id)
def _get_group_ids_for_user_id(self, user_id):
# TODO(morganfainberg): Implement a way to get only group_ids
# instead of the more expensive to_dict() call for each record.
return [x['id'] for
x in PROVIDERS.identity_api.list_groups_for_user(user_id)]
def list_user_ids_for_project(self, tenant_id):
PROVIDERS.resource_api.get_project(tenant_id)
assignment_list = self.list_role_assignments(
project_id=tenant_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['user_id'] for x in assignment_list]))
def _send_app_cred_notification_for_role_removal(self, role_id):
"""Delete all application credential for a specific role.
:param role_id: role identifier
:type role_id: string
"""
assignments = self.list_role_assignments(role_id=role_id)
for assignment in assignments:
if 'user_id' in assignment and 'project_id' in assignment:
payload = {
'user_id': assignment['user_id'],
'project_id': assignment['project_id']
}
notifications.Audit.internal(
notifications.REMOVE_APP_CREDS_FOR_USER, payload
)
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_user_and_project(self, user_id, tenant_id):
"""Get the roles associated with a user within given project.
This includes roles directly assigned to the user on the
project, as well as those by virtue of group membership or
inheritance.
:returns: a list of role ids.
:raises keystone.exception.ProjectNotFound: If the project doesn't
exist.
"""
PROVIDERS.resource_api.get_project(tenant_id)
assignment_list = self.list_role_assignments(
user_id=user_id, project_id=tenant_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_trustor_and_project(self, trustor_id, project_id):
"""Get the roles associated with a trustor within given project.
This includes roles directly assigned to the trustor on the
project, as well as those by virtue of group membership or
inheritance, but it doesn't include the domain roles.
:returns: a list of role ids.
:raises keystone.exception.ProjectNotFound: If the project doesn't
exist.
"""
PROVIDERS.resource_api.get_project(project_id)
assignment_list = self.list_role_assignments(
user_id=trustor_id, project_id=project_id, effective=True,
strip_domain_roles=False)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_user_and_domain(self, user_id, domain_id):
"""Get the roles associated with a user within given domain.
:returns: a list of role ids.
:raises keystone.exception.DomainNotFound: If the domain doesn't exist.
"""
PROVIDERS.resource_api.get_domain(domain_id)
assignment_list = self.list_role_assignments(
user_id=user_id, domain_id=domain_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
def get_roles_for_groups(self, group_ids, project_id=None, domain_id=None):
"""Get a list of roles for this group on domain and/or project."""
# if no group ids were passed, there are no roles. Without this check,
# all assignments for the project or domain will be fetched,
# which is not what we want.
if not group_ids:
return []
if project_id is not None:
PROVIDERS.resource_api.get_project(project_id)
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, project_id=project_id,
effective=True)
elif domain_id is not None:
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, domain_id=domain_id,
effective=True)
else:
raise AttributeError(_("Must specify either domain or project"))
role_ids = list(set([x['role_id'] for x in assignment_list]))
return PROVIDERS.role_api.list_roles_from_ids(role_ids)
@notifications.role_assignment('created')
def _add_role_to_user_and_project_adapter(self, role_id, user_id=None,
group_id=None, domain_id=None,
project_id=None,
inherited_to_projects=False,
context=None):
# The parameters for this method must match the parameters for
# create_grant so that the notifications.role_assignment decorator
# will work.
PROVIDERS.resource_api.get_project(project_id)
PROVIDERS.role_api.get_role(role_id)
self.driver.add_role_to_user_and_project(user_id, project_id, role_id)
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
self._add_role_to_user_and_project_adapter(
role_id, user_id=user_id, project_id=tenant_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# TODO(henry-nash): We might want to consider list limiting this at some
# point in the future.
@MEMOIZE_COMPUTED_ASSIGNMENTS
def list_projects_for_user(self, user_id):
# FIXME(lbragstad): Without the use of caching, listing effective role
# assignments is slow, especially with large data set (lots of users
# with multiple role assignments). This should serve as a marker in
# case we have the opportunity to come back and optimize this code so
# that it can be performant without having a hard dependency on
# caching. Please see https://bugs.launchpad.net/keystone/+bug/1700852
# for more details.
assignment_list = self.list_role_assignments(
user_id=user_id, effective=True)
# Use set() to process the list to remove any duplicates
project_ids = list(set([x['project_id'] for x in assignment_list
if x.get('project_id')]))
return PROVIDERS.resource_api.list_projects_from_ids(project_ids)
# TODO(henry-nash): We might want to consider list limiting this at some
# point in the future.
@MEMOIZE_COMPUTED_ASSIGNMENTS
def list_domains_for_user(self, user_id):
assignment_list = self.list_role_assignments(
user_id=user_id, effective=True)
# Use set() to process the list to remove any duplicates
domain_ids = list(set([x['domain_id'] for x in assignment_list
if x.get('domain_id')]))
return PROVIDERS.resource_api.list_domains_from_ids(domain_ids)
def list_domains_for_groups(self, group_ids):
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, effective=True)
domain_ids = list(set([x['domain_id'] for x in assignment_list
if x.get('domain_id')]))
return PROVIDERS.resource_api.list_domains_from_ids(domain_ids)
def list_projects_for_groups(self, group_ids):
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, effective=True)
project_ids = list(set([x['project_id'] for x in assignment_list
if x.get('project_id')]))
return PROVIDERS.resource_api.list_projects_from_ids(project_ids)
@notifications.role_assignment('deleted')
def _remove_role_from_user_and_project_adapter(self, role_id, user_id=None,
group_id=None,
domain_id=None,
project_id=None,
inherited_to_projects=False,
context=None):
# The parameters for this method must match the parameters for
# delete_grant so that the notifications.role_assignment decorator
# will work.
self.driver.remove_role_from_user_and_project(user_id, project_id,
role_id)
payload = {'user_id': user_id, 'project_id': project_id}
notifications.Audit.internal(
notifications.REMOVE_APP_CREDS_FOR_USER,
payload
)
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
self._remove_role_from_user_and_project_adapter(
role_id, user_id=user_id, project_id=tenant_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
def _invalidate_token_cache(self, role_id, group_id, user_id, project_id,
domain_id):
if group_id:
actor_type = 'group'
actor_id = group_id
elif user_id:
actor_type = 'user'
actor_id = user_id
if domain_id:
target_type = 'domain'
target_id = domain_id
elif project_id:
target_type = 'project'
target_id = project_id
reason = (
'Invalidating the token cache because role %(role_id)s was '
'removed from %(actor_type)s %(actor_id)s on %(target_type)s '
'%(target_id)s.' %
{'role_id': role_id, 'actor_type': actor_type,
'actor_id': actor_id, 'target_type': target_type,
'target_id': target_id}
)
notifications.invalidate_token_cache_notification(reason)
@notifications.role_assignment('created')
def create_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False,
initiator=None):
role = PROVIDERS.role_api.get_role(role_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
project = PROVIDERS.resource_api.get_project(project_id)
# For domain specific roles, the domain of the project
# and role must match
if role['domain_id'] and project['domain_id'] != role['domain_id']:
raise exception.DomainSpecificRoleMismatch(
role_id=role_id,
project_id=project_id)
self.driver.create_grant(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
def get_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
role_ref = PROVIDERS.role_api.get_role(role_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
self.check_grant_role_id(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
return role_ref
def list_grants(self, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
grant_ids = self.list_grant_role_ids(
user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
@notifications.role_assignment('deleted')
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False,
initiator=None):
# check if role exist before any processing
PROVIDERS.role_api.get_role(role_id)
if group_id is None:
# check if role exists on the user before revoke
self.check_grant_role_id(
role_id, user_id=user_id, group_id=None, domain_id=domain_id,
project_id=project_id,
inherited_to_projects=inherited_to_projects
)
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
else:
try:
# check if role exists on the group before revoke
self.check_grant_role_id(
role_id, user_id=None, group_id=group_id,
domain_id=domain_id, project_id=project_id,
inherited_to_projects=inherited_to_projects
)
if CONF.token.revoke_by_id:
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
except exception.GroupNotFound:
LOG.debug('Group %s not found, no tokens to invalidate.',
group_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
self.driver.delete_grant(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# The methods _expand_indirect_assignment, _list_direct_role_assignments
# and _list_effective_role_assignments below are only used on
# list_role_assignments, but they are not in its scope as nested functions
# since it would significantly increase McCabe complexity, that should be
# kept as it is in order to detect unnecessarily complex code, which is not
# this case.
def _expand_indirect_assignment(self, ref, user_id=None, project_id=None,
subtree_ids=None, expand_groups=True):
"""Return a list of expanded role assignments.
This methods is called for each discovered assignment that either needs
a group assignment expanded into individual user assignments, or needs
an inherited assignment to be applied to its children.
In all cases, if either user_id and/or project_id is specified, then we
filter the result on those values.
If project_id is specified and subtree_ids is None, then this
indicates that we are only interested in that one project. If
subtree_ids is not None, then this is an indicator that any
inherited assignments need to be expanded down the tree. The
actual subtree_ids don't need to be used as a filter here, since we
already ensured only those assignments that could affect them
were passed to this method.
If expand_groups is True then we expand groups out to a list of
assignments, one for each member of that group.
"""
def create_group_assignment(base_ref, user_id):
"""Create a group assignment from the provided ref."""
ref = copy.deepcopy(base_ref)
ref['user_id'] = user_id
indirect = ref.setdefault('indirect', {})
indirect['group_id'] = ref.pop('group_id')
return ref
def expand_group_assignment(ref, user_id):
"""Expand group role assignment.
For any group role assignment on a target, it is replaced by a list
of role assignments containing one for each user of that group on
that target.
An example of accepted ref is::
{
'group_id': group_id,
'project_id': project_id,
'role_id': role_id
}
Once expanded, it should be returned as a list of entities like the
one below, one for each each user_id in the provided group_id.
::
{
'user_id': user_id,
'project_id': project_id,
'role_id': role_id,
'indirect' : {
'group_id': group_id
}
}
Returned list will be formatted by the Controller, which will
deduce a role assignment came from group membership if it has both
'user_id' in the main body of the dict and 'group_id' in indirect
subdict.
"""
if user_id:
return [create_group_assignment(ref, user_id=user_id)]
# Note(prashkre): Try to get the users in a group,
# if a group wasn't found in the backend, users are set
# as empty list.
try:
users = PROVIDERS.identity_api.list_users_in_group(
ref['group_id'])
except exception.GroupNotFound:
LOG.warning('Group %(group)s was not found but still has role '
'assignments.', {'group': ref['group_id']})
users = []
return [create_group_assignment(ref, user_id=m['id'])
for m in users]
def expand_inherited_assignment(ref, user_id, project_id, subtree_ids,
expand_groups):
"""Expand inherited role assignments.
If expand_groups is True and this is a group role assignment on a
target, replace it by a list of role assignments containing one for
each user of that group, on every project under that target. If
expand_groups is False, then return a group assignment on an
inherited target.
If this is a user role assignment on a specific target (i.e.
project_id is specified, but subtree_ids is None) then simply
format this as a single assignment (since we are effectively
filtering on project_id). If however, project_id is None or
subtree_ids is not None, then replace this one assignment with a
list of role assignments for that user on every project under
that target.
An example of accepted ref is::
{
'group_id': group_id,
'project_id': parent_id,
'role_id': role_id,
'inherited_to_projects': 'projects'
}
Once expanded, it should be returned as a list of entities like the
one below, one for each each user_id in the provided group_id and
for each subproject_id in the project_id subtree.
::
{
'user_id': user_id,
'project_id': subproject_id,
'role_id': role_id,
'indirect' : {
'group_id': group_id,
'project_id': parent_id
}
}
Returned list will be formatted by the Controller, which will
deduce a role assignment came from group membership if it has both
'user_id' in the main body of the dict and 'group_id' in the
'indirect' subdict, as well as it is possible to deduce if it has
come from inheritance if it contains both a 'project_id' in the
main body of the dict and 'parent_id' in the 'indirect' subdict.
"""
def create_inherited_assignment(base_ref, project_id):
"""Create a project assignment from the provided ref.
base_ref can either be a project or domain inherited
assignment ref.
"""
ref = copy.deepcopy(base_ref)
indirect = ref.setdefault('indirect', {})
if ref.get('project_id'):
indirect['project_id'] = ref.pop('project_id')
else:
indirect['domain_id'] = ref.pop('domain_id')
ref['project_id'] = project_id
ref.pop('inherited_to_projects')
return ref
# Define expanded project list to which to apply this assignment
if project_id:
# Since ref is an inherited assignment and we are filtering by
# project(s), we are only going to apply the assignment to the
# relevant project(s)
project_ids = [project_id]
if subtree_ids:
project_ids += subtree_ids
# If this is a domain inherited assignment, then we know
# that all the project_ids will get this assignment. If
# it's a project inherited assignment, and the assignment
# point is an ancestor of project_id, then we know that
# again all the project_ids will get the assignment. If,
# however, the assignment point is within the subtree,
# then only a partial tree will get the assignment.
resource_api = PROVIDERS.resource_api
if ref.get('project_id'):
if ref['project_id'] in project_ids:
project_ids = (
[x['id'] for x in
resource_api.list_projects_in_subtree(
ref['project_id'])])
elif ref.get('domain_id'):
# A domain inherited assignment, so apply it to all projects
# in this domain
project_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_domain(
ref['domain_id'])])
else:
# It must be a project assignment, so apply it to its subtree
project_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_subtree(
ref['project_id'])])
new_refs = []
if 'group_id' in ref:
if expand_groups:
# Expand role assignment to all group members on any
# inherited target of any of the projects
for ref in expand_group_assignment(ref, user_id):
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
else:
# Just place the group assignment on any inherited target
# of any of the projects
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
else:
# Expand role assignment for all projects
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
return new_refs
if ref.get('inherited_to_projects') == 'projects':
return expand_inherited_assignment(
ref, user_id, project_id, subtree_ids, expand_groups)
elif 'group_id' in ref and expand_groups:
return expand_group_assignment(ref, user_id)
return [ref]
def add_implied_roles(self, role_refs):
"""Expand out implied roles.
The role_refs passed in have had all inheritance and group assignments
expanded out. We now need to look at the role_id in each ref and see
if it is a prior role for some implied roles. If it is, then we need to
duplicate that ref, one for each implied role. We store the prior role
in the indirect dict that is part of such a duplicated ref, so that a
caller can determine where the assignment came from.
"""
def _make_implied_ref_copy(prior_ref, implied_role_id):
# Create a ref for an implied role from the ref of a prior role,
# setting the new role_id to be the implied role and the indirect
# role_id to be the prior role
implied_ref = copy.deepcopy(prior_ref)
implied_ref['role_id'] = implied_role_id
indirect = implied_ref.setdefault('indirect', {})
indirect['role_id'] = prior_ref['role_id']
return implied_ref
if not CONF.token.infer_roles:
return role_refs
try:
implied_roles_cache = {}
role_refs_to_check = list(role_refs)
ref_results = list(role_refs)
checked_role_refs = list()
while(role_refs_to_check):
next_ref = role_refs_to_check.pop()
checked_role_refs.append(next_ref)
next_role_id = next_ref['role_id']
if next_role_id in implied_roles_cache:
implied_roles = implied_roles_cache[next_role_id]
else:
implied_roles = (
PROVIDERS.role_api.list_implied_roles(next_role_id))
implied_roles_cache[next_role_id] = implied_roles
for implied_role in implied_roles:
implied_ref = (
_make_implied_ref_copy(
next_ref, implied_role['implied_role_id']))
if implied_ref in checked_role_refs:
# Avoid traversing a cycle
continue
else:
ref_results.append(implied_ref)
role_refs_to_check.append(implied_ref)
except exception.NotImplemented:
LOG.error('Role driver does not support implied roles.')
return ref_results
def _filter_by_role_id(self, role_id, ref_results):
# if we arrive here, we need to filer by role_id.
filter_results = []
for ref in ref_results:
if ref['role_id'] == role_id:
filter_results.append(ref)
return filter_results
def _strip_domain_roles(self, role_refs):
"""Post process assignment list for domain roles.
Domain roles are only designed to do the job of inferring other roles
and since that has been done before this method is called, we need to
remove any assignments that include a domain role.
"""
def _role_is_global(role_id):
ref = PROVIDERS.role_api.get_role(role_id)
return (ref['domain_id'] is None)
filter_results = []
for ref in role_refs:
if _role_is_global(ref['role_id']):
filter_results.append(ref)
return filter_results
def _list_effective_role_assignments(self, role_id, user_id, group_id,
domain_id, project_id, subtree_ids,
inherited, source_from_group_ids,
strip_domain_roles):
"""List role assignments in effective mode.
When using effective mode, besides the direct assignments, the indirect
ones that come from grouping or inheritance are retrieved and will then
be expanded.
The resulting list of assignments will be filtered by the provided
parameters. If subtree_ids is not None, then we also want to include
all subtree_ids in the filter as well. Since we are in effective mode,
group can never act as a filter (since group assignments are expanded
into user roles) and domain can only be filter if we want non-inherited
assignments, since domains can't inherit assignments.
The goal of this method is to only ask the driver for those
assignments as could effect the result based on the parameter filters
specified, hence avoiding retrieving a huge list.
"""
def list_role_assignments_for_actor(
role_id, inherited, user_id=None, group_ids=None,
project_id=None, subtree_ids=None, domain_id=None):
"""List role assignments for actor on target.
List direct and indirect assignments for an actor, optionally
for a given target (i.e. projects or domain).
:param role_id: List for a specific role, can be None meaning all
roles
:param inherited: Indicates whether inherited assignments or only
direct assignments are required. If None, then
both are required.
:param user_id: If not None, list only assignments that affect this
user.
:param group_ids: A list of groups required. Only one of user_id
and group_ids can be specified
:param project_id: If specified, only include those assignments
that affect at least this project, with
additionally any projects specified in
subtree_ids
:param subtree_ids: The list of projects in the subtree. If
specified, also include those assignments that
affect these projects. These projects are
guaranteed to be in the same domain as the
project specified in project_id. subtree_ids
can only be specified if project_id has also
been specified.
:param domain_id: If specified, only include those assignments
that affect this domain - by definition this will
not include any inherited assignments
:returns: List of assignments matching the criteria. Any inherited
or group assignments that could affect the resulting
response are included.
"""
project_ids_of_interest = None
if project_id:
if subtree_ids:
project_ids_of_interest = subtree_ids + [project_id]
else:
project_ids_of_interest = [project_id]
# List direct project role assignments
non_inherited_refs = []
if inherited is False or inherited is None:
# Get non inherited assignments
non_inherited_refs = self.driver.list_role_assignments(
role_id=role_id, domain_id=domain_id,
project_ids=project_ids_of_interest, user_id=user_id,
group_ids=group_ids, inherited_to_projects=False)
inherited_refs = []
if inherited is True or inherited is None:
# Get inherited assignments
if project_id:
# The project and any subtree are guaranteed to be owned by
# the same domain, so since we are filtering by these
# specific projects, then we can only get inherited
# assignments from their common domain or from any of
# their parents projects.
# List inherited assignments from the project's domain
proj_domain_id = PROVIDERS.resource_api.get_project(
project_id)['domain_id']
inherited_refs += self.driver.list_role_assignments(
role_id=role_id, domain_id=proj_domain_id,
user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
# For inherited assignments from projects, since we know
# they are from the same tree the only places these can
# come from are from parents of the main project or
# inherited assignments on the project or subtree itself.
source_ids = [project['id'] for project in
PROVIDERS.resource_api.list_project_parents(
project_id)]
if subtree_ids:
source_ids += project_ids_of_interest
if source_ids:
inherited_refs += self.driver.list_role_assignments(
role_id=role_id, project_ids=source_ids,
user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
else:
# List inherited assignments without filtering by target
inherited_refs = self.driver.list_role_assignments(
role_id=role_id, user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
return non_inherited_refs + inherited_refs
# If filtering by group or inherited domain assignment the list is
# guaranteed to be empty
if group_id or (domain_id and inherited):
return []
if user_id and source_from_group_ids:
# You can't do both - and since source_from_group_ids is only used
# internally, this must be a coding error by the caller.
msg = _('Cannot list assignments sourced from groups and filtered '
'by user ID.')
raise exception.UnexpectedError(msg)
# If filtering by domain, then only non-inherited assignments are
# relevant, since domains don't inherit assignments
inherited = False if domain_id else inherited
# List user or explicit group assignments.
# Due to the need to expand implied roles, this call will skip
# filtering by role_id and instead return the whole set of roles.
# Matching on the specified role is performed at the end.
direct_refs = list_role_assignments_for_actor(
role_id=None, user_id=user_id, group_ids=source_from_group_ids,
project_id=project_id, subtree_ids=subtree_ids,
domain_id=domain_id, inherited=inherited)
# And those from the user's groups, so long as we are not restricting
# to a set of source groups (in which case we already got those
# assignments in the direct listing above).
group_refs = []
if not source_from_group_ids and user_id:
group_ids = self._get_group_ids_for_user_id(user_id)
if group_ids:
group_refs = list_role_assignments_for_actor(
role_id=None, project_id=project_id,
subtree_ids=subtree_ids, group_ids=group_ids,
domain_id=domain_id, inherited=inherited)
# Expand grouping and inheritance on retrieved role assignments
refs = []
expand_groups = (source_from_group_ids is None)
for ref in (direct_refs + group_refs):
refs += self._expand_indirect_assignment(
ref, user_id, project_id, subtree_ids, expand_groups)
refs = self.add_implied_roles(refs)
if strip_domain_roles:
refs = self._strip_domain_roles(refs)
if role_id:
refs = self._filter_by_role_id(role_id, refs)
return refs
def _list_direct_role_assignments(self, role_id, user_id, group_id, system,
domain_id, project_id, subtree_ids,
inherited):
"""List role assignments without applying expansion.
Returns a list of direct role assignments, where their attributes match
the provided filters. If subtree_ids is not None, then we also want to
include all subtree_ids in the filter as well.
"""
group_ids = [group_id] if group_id else None
project_ids_of_interest = None
if project_id:
if subtree_ids:
project_ids_of_interest = subtree_ids + [project_id]
else:
project_ids_of_interest = [project_id]
project_and_domain_assignments = []
if not system:
project_and_domain_assignments = self.driver.list_role_assignments(
role_id=role_id, user_id=user_id, group_ids=group_ids,
domain_id=domain_id, project_ids=project_ids_of_interest,
inherited_to_projects=inherited)
system_assignments = []
if system or (not project_id and not domain_id and not system):
if user_id:
assignments = self.list_system_grants_for_user(user_id)
for assignment in assignments:
system_assignments.append(
{'system': {'all': True},
'user_id': user_id,
'role_id': assignment['id']}
)
elif group_id:
assignments = self.list_system_grants_for_group(group_id)
for assignment in assignments:
system_assignments.append(
{'system': {'all': True},
'group_id': group_id,
'role_id': assignment['id']}
)
else:
assignments = self.list_all_system_grants()
for assignment in assignments:
a = {}
if assignment['type'] == self._GROUP_SYSTEM:
a['group_id'] = assignment['actor_id']
elif assignment['type'] == self._USER_SYSTEM:
a['user_id'] = assignment['actor_id']
a['role_id'] = assignment['role_id']
a['system'] = {'all': True}
system_assignments.append(a)
for i, assignment in enumerate(system_assignments):
if role_id and role_id != assignment['role_id']:
system_assignments.pop(i)
assignments = []
for assignment in itertools.chain(
project_and_domain_assignments, system_assignments):
assignments.append(assignment)
return assignments
def list_role_assignments(self, role_id=None, user_id=None, group_id=None,
system=None, domain_id=None, project_id=None,
include_subtree=False, inherited=None,
effective=None, include_names=False,
source_from_group_ids=None,
strip_domain_roles=True):
"""List role assignments, honoring effective mode and provided filters.
Returns a list of role assignments, where their attributes match the
provided filters (role_id, user_id, group_id, domain_id, project_id and
inherited). If include_subtree is True, then assignments on all
descendants of the project specified by project_id are also included.
The inherited filter defaults to None, meaning to get both
non-inherited and inherited role assignments.
If effective mode is specified, this means that rather than simply
return the assignments that match the filters, any group or
inheritance assignments will be expanded. Group assignments will
become assignments for all the users in that group, and inherited
assignments will be shown on the projects below the assignment point.
Think of effective mode as being the list of assignments that actually
affect a user, for example the roles that would be placed in a token.
If include_names is set to true the entities' names are returned
in addition to their id's.
source_from_group_ids is a list of group IDs and, if specified, then
only those assignments that are derived from membership of these groups
are considered, and any such assignments will not be expanded into
their user membership assignments. This is different to a group filter
of the resulting list, instead being a restriction on which assignments
should be considered before expansion of inheritance. This option is
only used internally (i.e. it is not exposed at the API level) and is
only supported in effective mode (since in regular mode there is no
difference between this and a group filter, other than it is a list of
groups).
In effective mode, any domain specific roles are usually stripped from
the returned assignments (since such roles are not placed in tokens).
This stripping can be disabled by specifying strip_domain_roles=False,
which is useful for internal calls like trusts which need to examine
the full set of roles.
"""
subtree_ids = None
if project_id and include_subtree:
subtree_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_subtree(
project_id)])
if system != 'all':
system = None
if effective:
role_assignments = self._list_effective_role_assignments(
role_id, user_id, group_id, domain_id, project_id,
subtree_ids, inherited, source_from_group_ids,
strip_domain_roles)
else:
role_assignments = self._list_direct_role_assignments(
role_id, user_id, group_id, system, domain_id, project_id,
subtree_ids, inherited)
if include_names:
return self._get_names_from_role_assignments(role_assignments)
return role_assignments
def _get_names_from_role_assignments(self, role_assignments):
role_assign_list = []
for role_asgmt in role_assignments:
new_assign = copy.deepcopy(role_asgmt)
for key, value in role_asgmt.items():
if key == 'domain_id':
_domain = PROVIDERS.resource_api.get_domain(value)
new_assign['domain_name'] = _domain['name']
elif key == 'user_id':
try:
# Note(knikolla): Try to get the user, otherwise
# if the user wasn't found in the backend
# use empty values.
_user = PROVIDERS.identity_api.get_user(value)
except exception.UserNotFound:
msg = ('User %(user)s not found in the'
' backend but still has role assignments.')
LOG.warning(msg, {'user': value})
new_assign['user_name'] = ''
new_assign['user_domain_id'] = ''
new_assign['user_domain_name'] = ''
else:
new_assign['user_name'] = _user['name']
new_assign['user_domain_id'] = _user['domain_id']
new_assign['user_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_user['domain_id'])['name'])
elif key == 'group_id':
try:
# Note(knikolla): Try to get the group, otherwise
# if the group wasn't found in the backend
# use empty values.
_group = PROVIDERS.identity_api.get_group(value)
except exception.GroupNotFound:
msg = ('Group %(group)s not found in the'
' backend but still has role assignments.')
LOG.warning(msg, {'group': value})
new_assign['group_name'] = ''
new_assign['group_domain_id'] = ''
new_assign['group_domain_name'] = ''
else:
new_assign['group_name'] = _group['name']
new_assign['group_domain_id'] = _group['domain_id']
new_assign['group_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_group['domain_id'])['name'])
elif key == 'project_id':
_project = PROVIDERS.resource_api.get_project(value)
new_assign['project_name'] = _project['name']
new_assign['project_domain_id'] = _project['domain_id']
new_assign['project_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_project['domain_id'])['name'])
elif key == 'role_id':
_role = PROVIDERS.role_api.get_role(value)
new_assign['role_name'] = _role['name']
if _role['domain_id'] is not None:
new_assign['role_domain_id'] = _role['domain_id']
new_assign['role_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_role['domain_id'])['name'])
role_assign_list.append(new_assign)
return role_assign_list
def delete_group_assignments(self, group_id):
# FIXME(lbragstad): This should be refactored in the Rocky release so
# that we can pass the group_id to the system assignment backend like
# we do with the project and domain assignment backend. Holding off on
# this because it will require an interface change to the backend,
# making it harder to backport for Queens RC.
self.driver.delete_group_assignments(group_id)
system_assignments = self.list_system_grants_for_group(group_id)
for assignment in system_assignments:
self.delete_system_grant_for_group(group_id, assignment['id'])
def delete_user_assignments(self, user_id):
# FIXME(lbragstad): This should be refactored in the Rocky release so
# that we can pass the user_id to the system assignment backend like we
# do with the project and domain assignment backend. Holding off on
# this because it will require an interface change to the backend,
# making it harder to backport for Queens RC.
self.driver.delete_user_assignments(user_id)
system_assignments = self.list_system_grants_for_user(user_id)
for assignment in system_assignments:
self.delete_system_grant_for_user(user_id, assignment['id'])
def check_system_grant_for_user(self, user_id, role_id):
"""Check if a user has a specific role on the system.
:param user_id: the ID of the user in the assignment
:param role_id: the ID of the system role in the assignment
:raises keystone.exception.RoleAssignmentNotFound: if the user doesn't
have a role assignment matching the role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
return self.driver.check_system_grant(
role_id, user_id, target_id, inherited
)
def list_system_grants_for_user(self, user_id):
"""Return a list of roles the user has on the system.
:param user_id: the ID of the user
:returns: a list of role assignments the user has system-wide
"""
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._USER_SYSTEM
grants = self.driver.list_system_grants(
user_id, target_id, assignment_type
)
grant_ids = []
for grant in grants:
grant_ids.append(grant['role_id'])
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
def create_system_grant_for_user(self, user_id, role_id):
"""Grant a user a role on the system.
:param user_id: the ID of the user
:param role_id: the ID of the role to grant on the system
"""
role = PROVIDERS.role_api.get_role(role_id)
if role.get('domain_id'):
raise exception.ValidationError(
'Role %(role_id)s is a domain-specific role. Unable to use '
'a domain-specific role in a system assignment.' % {
'role_id': role_id
}
)
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._USER_SYSTEM
inherited = False
self.driver.create_system_grant(
role_id, user_id, target_id, assignment_type, inherited
)
def delete_system_grant_for_user(self, user_id, role_id):
"""Remove a system grant from a user.
:param user_id: the ID of the user
:param role_id: the ID of the role to remove from the user on the
system
:raises keystone.exception.RoleAssignmentNotFound: if the user doesn't
have a role assignment with role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
self.driver.delete_system_grant(role_id, user_id, target_id, inherited)
def check_system_grant_for_group(self, group_id, role_id):
"""Check if a group has a specific role on the system.
:param group_id: the ID of the group in the assignment
:param role_id: the ID of the system role in the assignment
:raises keystone.exception.RoleAssignmentNotFound: if the group doesn't
have a role assignment matching the role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
return self.driver.check_system_grant(
role_id, group_id, target_id, inherited
)
def list_system_grants_for_group(self, group_id):
"""Return a list of roles the group has on the system.
:param group_id: the ID of the group
:returns: a list of role assignments the group has system-wide
"""
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._GROUP_SYSTEM
grants = self.driver.list_system_grants(
group_id, target_id, assignment_type
)
grant_ids = []
for grant in grants:
grant_ids.append(grant['role_id'])
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
def create_system_grant_for_group(self, group_id, role_id):
"""Grant a group a role on the system.
:param group_id: the ID of the group
:param role_id: the ID of the role to grant on the system
"""
role = PROVIDERS.role_api.get_role(role_id)
if role.get('domain_id'):
raise exception.ValidationError(
'Role %(role_id)s is a domain-specific role. Unable to use '
'a domain-specific role in a system assignment.' % {
'role_id': role_id
}
)
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._GROUP_SYSTEM
inherited = False
self.driver.create_system_grant(
role_id, group_id, target_id, assignment_type, inherited
)
def delete_system_grant_for_group(self, group_id, role_id):
"""Remove a system grant from a group.
:param group_id: the ID of the group
:param role_id: the ID of the role to remove from the group on the
system
:raises keystone.exception.RoleAssignmentNotFound: if the group doesn't
have a role assignment with role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
self.driver.delete_system_grant(
role_id, group_id, target_id, inherited
)
def list_all_system_grants(self):
"""Return a list of all system grants."""
actor_id = None
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = None
return self.driver.list_system_grants(
actor_id, target_id, assignment_type
)
class RoleManager(manager.Manager):
"""Default pivot point for the Role backend."""
driver_namespace = 'keystone.role'
_provides_api = 'role_api'
_ROLE = 'role'
def __init__(self):
# If there is a specific driver specified for role, then use it.
# Otherwise retrieve the driver type from the assignment driver.
role_driver = CONF.role.driver
if role_driver is None:
# Explicitly load the assignment manager object
assignment_driver = CONF.assignment.driver
assignment_manager_obj = manager.load_driver(
Manager.driver_namespace,
assignment_driver)
role_driver = assignment_manager_obj.default_role_driver()
super(RoleManager, self).__init__(role_driver)
@MEMOIZE
def get_role(self, role_id):
return self.driver.get_role(role_id)
def get_unique_role_by_name(self, role_name, hints=None):
if not hints:
hints = driver_hints.Hints()
hints.add_filter("name", role_name, case_sensitive=True)
found_roles = PROVIDERS.role_api.list_roles(hints)
if not found_roles:
raise exception.RoleNotFound(
_("Role %s is not defined") % role_name
)
elif len(found_roles) == 1:
return {'id': found_roles[0]['id']}
else:
raise exception.AmbiguityError(resource='role',
name=role_name)
def create_role(self, role_id, role, initiator=None):
ret = self.driver.create_role(role_id, role)
notifications.Audit.created(self._ROLE, role_id, initiator)
if MEMOIZE.should_cache(ret):
self.get_role.set(ret, self, role_id)
return ret
@manager.response_truncated
def list_roles(self, hints=None):
return self.driver.list_roles(hints or driver_hints.Hints())
def update_role(self, role_id, role, initiator=None):
original_role = self.driver.get_role(role_id)
if ('domain_id' in role and
role['domain_id'] != original_role['domain_id']):
raise exception.ValidationError(
message=_('Update of `domain_id` is not allowed.'))
ret = self.driver.update_role(role_id, role)
notifications.Audit.updated(self._ROLE, role_id, initiator)
self.get_role.invalidate(self, role_id)
return ret
def delete_role(self, role_id, initiator=None):
PROVIDERS.assignment_api.delete_role_assignments(role_id)
PROVIDERS.assignment_api._send_app_cred_notification_for_role_removal(
role_id
)
self.driver.delete_role(role_id)
notifications.Audit.deleted(self._ROLE, role_id, initiator)
self.get_role.invalidate(self, role_id)
reason = (
'Invalidating the token cache because role %(role_id)s has been '
'removed. Role assignments for users will be recalculated and '
'enforced accordingly the next time they authenticate or validate '
'a token' % {'role_id': role_id}
)
notifications.invalidate_token_cache_notification(reason)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# TODO(ayoung): Add notification
def create_implied_role(self, prior_role_id, implied_role_id):
implied_role = self.driver.get_role(implied_role_id)
prior_role = self.driver.get_role(prior_role_id)
if implied_role['name'] in CONF.assignment.prohibited_implied_role:
raise exception.InvalidImpliedRole(role_id=implied_role_id)
if prior_role['domain_id'] is None and implied_role['domain_id']:
msg = _('Global role cannot imply a domain-specific role')
raise exception.InvalidImpliedRole(msg,
role_id=implied_role_id)
response = self.driver.create_implied_role(
prior_role_id, implied_role_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
return response
def delete_implied_role(self, prior_role_id, implied_role_id):
self.driver.delete_implied_role(prior_role_id, implied_role_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
| en | 0.915638 | # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Main entry point into the Assignment service. # This is a general cache region for assignment administration (CRUD # operations). # This builds a discrete cache region dedicated to role assignments computed # for a given user + project/domain pair. Any write operation to add or remove # any role assignment should invalidate this entire cache region. Default pivot point for the Assignment backend. See :class:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. # TODO(morganfainberg): Implement a way to get only group_ids # instead of the more expensive to_dict() call for each record. # Use set() to process the list to remove any duplicates Delete all application credential for a specific role. :param role_id: role identifier :type role_id: string Get the roles associated with a user within given project. This includes roles directly assigned to the user on the project, as well as those by virtue of group membership or inheritance. :returns: a list of role ids. :raises keystone.exception.ProjectNotFound: If the project doesn't exist. # Use set() to process the list to remove any duplicates Get the roles associated with a trustor within given project. This includes roles directly assigned to the trustor on the project, as well as those by virtue of group membership or inheritance, but it doesn't include the domain roles. :returns: a list of role ids. :raises keystone.exception.ProjectNotFound: If the project doesn't exist. # Use set() to process the list to remove any duplicates Get the roles associated with a user within given domain. :returns: a list of role ids. :raises keystone.exception.DomainNotFound: If the domain doesn't exist. # Use set() to process the list to remove any duplicates Get a list of roles for this group on domain and/or project. # if no group ids were passed, there are no roles. Without this check, # all assignments for the project or domain will be fetched, # which is not what we want. # The parameters for this method must match the parameters for # create_grant so that the notifications.role_assignment decorator # will work. # TODO(henry-nash): We might want to consider list limiting this at some # point in the future. # FIXME(lbragstad): Without the use of caching, listing effective role # assignments is slow, especially with large data set (lots of users # with multiple role assignments). This should serve as a marker in # case we have the opportunity to come back and optimize this code so # that it can be performant without having a hard dependency on # caching. Please see https://bugs.launchpad.net/keystone/+bug/1700852 # for more details. # Use set() to process the list to remove any duplicates # TODO(henry-nash): We might want to consider list limiting this at some # point in the future. # Use set() to process the list to remove any duplicates # The parameters for this method must match the parameters for # delete_grant so that the notifications.role_assignment decorator # will work. # For domain specific roles, the domain of the project # and role must match # check if role exist before any processing # check if role exists on the user before revoke # check if role exists on the group before revoke # The methods _expand_indirect_assignment, _list_direct_role_assignments # and _list_effective_role_assignments below are only used on # list_role_assignments, but they are not in its scope as nested functions # since it would significantly increase McCabe complexity, that should be # kept as it is in order to detect unnecessarily complex code, which is not # this case. Return a list of expanded role assignments. This methods is called for each discovered assignment that either needs a group assignment expanded into individual user assignments, or needs an inherited assignment to be applied to its children. In all cases, if either user_id and/or project_id is specified, then we filter the result on those values. If project_id is specified and subtree_ids is None, then this indicates that we are only interested in that one project. If subtree_ids is not None, then this is an indicator that any inherited assignments need to be expanded down the tree. The actual subtree_ids don't need to be used as a filter here, since we already ensured only those assignments that could affect them were passed to this method. If expand_groups is True then we expand groups out to a list of assignments, one for each member of that group. Create a group assignment from the provided ref. Expand group role assignment. For any group role assignment on a target, it is replaced by a list of role assignments containing one for each user of that group on that target. An example of accepted ref is:: { 'group_id': group_id, 'project_id': project_id, 'role_id': role_id } Once expanded, it should be returned as a list of entities like the one below, one for each each user_id in the provided group_id. :: { 'user_id': user_id, 'project_id': project_id, 'role_id': role_id, 'indirect' : { 'group_id': group_id } } Returned list will be formatted by the Controller, which will deduce a role assignment came from group membership if it has both 'user_id' in the main body of the dict and 'group_id' in indirect subdict. # Note(prashkre): Try to get the users in a group, # if a group wasn't found in the backend, users are set # as empty list. Expand inherited role assignments. If expand_groups is True and this is a group role assignment on a target, replace it by a list of role assignments containing one for each user of that group, on every project under that target. If expand_groups is False, then return a group assignment on an inherited target. If this is a user role assignment on a specific target (i.e. project_id is specified, but subtree_ids is None) then simply format this as a single assignment (since we are effectively filtering on project_id). If however, project_id is None or subtree_ids is not None, then replace this one assignment with a list of role assignments for that user on every project under that target. An example of accepted ref is:: { 'group_id': group_id, 'project_id': parent_id, 'role_id': role_id, 'inherited_to_projects': 'projects' } Once expanded, it should be returned as a list of entities like the one below, one for each each user_id in the provided group_id and for each subproject_id in the project_id subtree. :: { 'user_id': user_id, 'project_id': subproject_id, 'role_id': role_id, 'indirect' : { 'group_id': group_id, 'project_id': parent_id } } Returned list will be formatted by the Controller, which will deduce a role assignment came from group membership if it has both 'user_id' in the main body of the dict and 'group_id' in the 'indirect' subdict, as well as it is possible to deduce if it has come from inheritance if it contains both a 'project_id' in the main body of the dict and 'parent_id' in the 'indirect' subdict. Create a project assignment from the provided ref. base_ref can either be a project or domain inherited assignment ref. # Define expanded project list to which to apply this assignment # Since ref is an inherited assignment and we are filtering by # project(s), we are only going to apply the assignment to the # relevant project(s) # If this is a domain inherited assignment, then we know # that all the project_ids will get this assignment. If # it's a project inherited assignment, and the assignment # point is an ancestor of project_id, then we know that # again all the project_ids will get the assignment. If, # however, the assignment point is within the subtree, # then only a partial tree will get the assignment. # A domain inherited assignment, so apply it to all projects # in this domain # It must be a project assignment, so apply it to its subtree # Expand role assignment to all group members on any # inherited target of any of the projects # Just place the group assignment on any inherited target # of any of the projects # Expand role assignment for all projects Expand out implied roles. The role_refs passed in have had all inheritance and group assignments expanded out. We now need to look at the role_id in each ref and see if it is a prior role for some implied roles. If it is, then we need to duplicate that ref, one for each implied role. We store the prior role in the indirect dict that is part of such a duplicated ref, so that a caller can determine where the assignment came from. # Create a ref for an implied role from the ref of a prior role, # setting the new role_id to be the implied role and the indirect # role_id to be the prior role # Avoid traversing a cycle # if we arrive here, we need to filer by role_id. Post process assignment list for domain roles. Domain roles are only designed to do the job of inferring other roles and since that has been done before this method is called, we need to remove any assignments that include a domain role. List role assignments in effective mode. When using effective mode, besides the direct assignments, the indirect ones that come from grouping or inheritance are retrieved and will then be expanded. The resulting list of assignments will be filtered by the provided parameters. If subtree_ids is not None, then we also want to include all subtree_ids in the filter as well. Since we are in effective mode, group can never act as a filter (since group assignments are expanded into user roles) and domain can only be filter if we want non-inherited assignments, since domains can't inherit assignments. The goal of this method is to only ask the driver for those assignments as could effect the result based on the parameter filters specified, hence avoiding retrieving a huge list. List role assignments for actor on target. List direct and indirect assignments for an actor, optionally for a given target (i.e. projects or domain). :param role_id: List for a specific role, can be None meaning all roles :param inherited: Indicates whether inherited assignments or only direct assignments are required. If None, then both are required. :param user_id: If not None, list only assignments that affect this user. :param group_ids: A list of groups required. Only one of user_id and group_ids can be specified :param project_id: If specified, only include those assignments that affect at least this project, with additionally any projects specified in subtree_ids :param subtree_ids: The list of projects in the subtree. If specified, also include those assignments that affect these projects. These projects are guaranteed to be in the same domain as the project specified in project_id. subtree_ids can only be specified if project_id has also been specified. :param domain_id: If specified, only include those assignments that affect this domain - by definition this will not include any inherited assignments :returns: List of assignments matching the criteria. Any inherited or group assignments that could affect the resulting response are included. # List direct project role assignments # Get non inherited assignments # Get inherited assignments # The project and any subtree are guaranteed to be owned by # the same domain, so since we are filtering by these # specific projects, then we can only get inherited # assignments from their common domain or from any of # their parents projects. # List inherited assignments from the project's domain # For inherited assignments from projects, since we know # they are from the same tree the only places these can # come from are from parents of the main project or # inherited assignments on the project or subtree itself. # List inherited assignments without filtering by target # If filtering by group or inherited domain assignment the list is # guaranteed to be empty # You can't do both - and since source_from_group_ids is only used # internally, this must be a coding error by the caller. # If filtering by domain, then only non-inherited assignments are # relevant, since domains don't inherit assignments # List user or explicit group assignments. # Due to the need to expand implied roles, this call will skip # filtering by role_id and instead return the whole set of roles. # Matching on the specified role is performed at the end. # And those from the user's groups, so long as we are not restricting # to a set of source groups (in which case we already got those # assignments in the direct listing above). # Expand grouping and inheritance on retrieved role assignments List role assignments without applying expansion. Returns a list of direct role assignments, where their attributes match the provided filters. If subtree_ids is not None, then we also want to include all subtree_ids in the filter as well. List role assignments, honoring effective mode and provided filters. Returns a list of role assignments, where their attributes match the provided filters (role_id, user_id, group_id, domain_id, project_id and inherited). If include_subtree is True, then assignments on all descendants of the project specified by project_id are also included. The inherited filter defaults to None, meaning to get both non-inherited and inherited role assignments. If effective mode is specified, this means that rather than simply return the assignments that match the filters, any group or inheritance assignments will be expanded. Group assignments will become assignments for all the users in that group, and inherited assignments will be shown on the projects below the assignment point. Think of effective mode as being the list of assignments that actually affect a user, for example the roles that would be placed in a token. If include_names is set to true the entities' names are returned in addition to their id's. source_from_group_ids is a list of group IDs and, if specified, then only those assignments that are derived from membership of these groups are considered, and any such assignments will not be expanded into their user membership assignments. This is different to a group filter of the resulting list, instead being a restriction on which assignments should be considered before expansion of inheritance. This option is only used internally (i.e. it is not exposed at the API level) and is only supported in effective mode (since in regular mode there is no difference between this and a group filter, other than it is a list of groups). In effective mode, any domain specific roles are usually stripped from the returned assignments (since such roles are not placed in tokens). This stripping can be disabled by specifying strip_domain_roles=False, which is useful for internal calls like trusts which need to examine the full set of roles. # Note(knikolla): Try to get the user, otherwise # if the user wasn't found in the backend # use empty values. # Note(knikolla): Try to get the group, otherwise # if the group wasn't found in the backend # use empty values. # FIXME(lbragstad): This should be refactored in the Rocky release so # that we can pass the group_id to the system assignment backend like # we do with the project and domain assignment backend. Holding off on # this because it will require an interface change to the backend, # making it harder to backport for Queens RC. # FIXME(lbragstad): This should be refactored in the Rocky release so # that we can pass the user_id to the system assignment backend like we # do with the project and domain assignment backend. Holding off on # this because it will require an interface change to the backend, # making it harder to backport for Queens RC. Check if a user has a specific role on the system. :param user_id: the ID of the user in the assignment :param role_id: the ID of the system role in the assignment :raises keystone.exception.RoleAssignmentNotFound: if the user doesn't have a role assignment matching the role_id on the system Return a list of roles the user has on the system. :param user_id: the ID of the user :returns: a list of role assignments the user has system-wide Grant a user a role on the system. :param user_id: the ID of the user :param role_id: the ID of the role to grant on the system Remove a system grant from a user. :param user_id: the ID of the user :param role_id: the ID of the role to remove from the user on the system :raises keystone.exception.RoleAssignmentNotFound: if the user doesn't have a role assignment with role_id on the system Check if a group has a specific role on the system. :param group_id: the ID of the group in the assignment :param role_id: the ID of the system role in the assignment :raises keystone.exception.RoleAssignmentNotFound: if the group doesn't have a role assignment matching the role_id on the system Return a list of roles the group has on the system. :param group_id: the ID of the group :returns: a list of role assignments the group has system-wide Grant a group a role on the system. :param group_id: the ID of the group :param role_id: the ID of the role to grant on the system Remove a system grant from a group. :param group_id: the ID of the group :param role_id: the ID of the role to remove from the group on the system :raises keystone.exception.RoleAssignmentNotFound: if the group doesn't have a role assignment with role_id on the system Return a list of all system grants. Default pivot point for the Role backend. # If there is a specific driver specified for role, then use it. # Otherwise retrieve the driver type from the assignment driver. # Explicitly load the assignment manager object # TODO(ayoung): Add notification | 1.866795 | 2 |
single-shot-pose/lib/linemod_dataset.py | take-cheeze/models | 112 | 10602 | <gh_stars>100-1000
import numpy as np
import os
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from chainercv.utils import read_image
linemod_object_diameters = {
'ape': 0.103,
'benchvise': 0.286908,
'cam': 0.173,
'can': 0.202,
'cat': 0.155,
'driller': 0.262,
'duck': 0.109,
'eggbox': 0.176364,
'glue': 0.176,
'holepuncher': 0.162,
'iron': 0.303153,
'lamp': 0.285155,
'phone': 0.213}
class LinemodDataset(GetterDataset):
def __init__(self, base_dir, obj_name='ape', split='train',
return_msk=False):
super(LinemodDataset, self).__init__()
split_path = os.path.join(
base_dir, 'LINEMOD', obj_name, '{}.txt'.format(split))
self.base_dir = base_dir
with open(split_path, 'r') as f:
self.img_paths = f.readlines()
self.add_getter(('img', 'point', 'label'), self._get_example)
if return_msk:
self.add_getter('msk', self._get_msk)
def __len__(self):
return len(self.img_paths)
def _get_example(self, i):
img_path = os.path.join(self.base_dir, self.img_paths[i].rstrip())
img = read_image(img_path)
anno_path = img_path.replace(
'images', 'labels').replace(
'JPEGImages', 'labels').replace(
'.jpg', '.txt').replace('.png', '.txt')
anno = np.zeros(50*21)
if os.path.getsize(anno_path):
_, H, W = img.shape
tmp = read_truths_args(anno_path, 8.0/W)
size = tmp.size
if size > 50*21:
anno = tmp[0:50*21]
elif size > 0:
anno[0:size] = tmp
anno = anno.reshape(-1, 21)
anno = anno[:truths_length(anno)]
point = anno[:, 1:19].reshape(-1, 9, 2).astype(np.float32)
point[:, :, 0] *= W
point[:, :, 1] *= H
label = anno[:, 0].astype(np.int32)
return img, point, label
def _get_msk(self, i):
img_path = os.path.join(self.base_dir, self.img_paths[i].rstrip())
mskpath = img_path.replace('JPEGImages', 'mask').replace(
'/00', '/').replace('.jpg', '.png')
msk = read_image(mskpath, color=False)[0]
return msk > 0
def truths_length(truths):
for i in range(50):
if truths[i][1] == 0:
return i
def read_truths(lab_path):
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
# to avoid single truth problem
truths = truths.reshape(truths.size//21, 21)
return truths
else:
return np.array([])
def read_truths_args(lab_path, min_box_scale):
truths = read_truths(lab_path)
new_truths = []
for i in range(truths.shape[0]):
new_truths.append(
[truths[i][0], truths[i][1], truths[i][2],
truths[i][3], truths[i][4], truths[i][5],
truths[i][6], truths[i][7], truths[i][8],
truths[i][9], truths[i][10], truths[i][11],
truths[i][12], truths[i][13], truths[i][14],
truths[i][15], truths[i][16], truths[i][17],
truths[i][18]])
return np.array(new_truths)
| import numpy as np
import os
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from chainercv.utils import read_image
linemod_object_diameters = {
'ape': 0.103,
'benchvise': 0.286908,
'cam': 0.173,
'can': 0.202,
'cat': 0.155,
'driller': 0.262,
'duck': 0.109,
'eggbox': 0.176364,
'glue': 0.176,
'holepuncher': 0.162,
'iron': 0.303153,
'lamp': 0.285155,
'phone': 0.213}
class LinemodDataset(GetterDataset):
def __init__(self, base_dir, obj_name='ape', split='train',
return_msk=False):
super(LinemodDataset, self).__init__()
split_path = os.path.join(
base_dir, 'LINEMOD', obj_name, '{}.txt'.format(split))
self.base_dir = base_dir
with open(split_path, 'r') as f:
self.img_paths = f.readlines()
self.add_getter(('img', 'point', 'label'), self._get_example)
if return_msk:
self.add_getter('msk', self._get_msk)
def __len__(self):
return len(self.img_paths)
def _get_example(self, i):
img_path = os.path.join(self.base_dir, self.img_paths[i].rstrip())
img = read_image(img_path)
anno_path = img_path.replace(
'images', 'labels').replace(
'JPEGImages', 'labels').replace(
'.jpg', '.txt').replace('.png', '.txt')
anno = np.zeros(50*21)
if os.path.getsize(anno_path):
_, H, W = img.shape
tmp = read_truths_args(anno_path, 8.0/W)
size = tmp.size
if size > 50*21:
anno = tmp[0:50*21]
elif size > 0:
anno[0:size] = tmp
anno = anno.reshape(-1, 21)
anno = anno[:truths_length(anno)]
point = anno[:, 1:19].reshape(-1, 9, 2).astype(np.float32)
point[:, :, 0] *= W
point[:, :, 1] *= H
label = anno[:, 0].astype(np.int32)
return img, point, label
def _get_msk(self, i):
img_path = os.path.join(self.base_dir, self.img_paths[i].rstrip())
mskpath = img_path.replace('JPEGImages', 'mask').replace(
'/00', '/').replace('.jpg', '.png')
msk = read_image(mskpath, color=False)[0]
return msk > 0
def truths_length(truths):
for i in range(50):
if truths[i][1] == 0:
return i
def read_truths(lab_path):
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
# to avoid single truth problem
truths = truths.reshape(truths.size//21, 21)
return truths
else:
return np.array([])
def read_truths_args(lab_path, min_box_scale):
truths = read_truths(lab_path)
new_truths = []
for i in range(truths.shape[0]):
new_truths.append(
[truths[i][0], truths[i][1], truths[i][2],
truths[i][3], truths[i][4], truths[i][5],
truths[i][6], truths[i][7], truths[i][8],
truths[i][9], truths[i][10], truths[i][11],
truths[i][12], truths[i][13], truths[i][14],
truths[i][15], truths[i][16], truths[i][17],
truths[i][18]])
return np.array(new_truths) | en | 0.890393 | # to avoid single truth problem | 2.396301 | 2 |
DQM/BeamMonitor/test/44X_beam_dqm_sourceclient-live_cfg.py | nistefan/cmssw | 0 | 10603 | <filename>DQM/BeamMonitor/test/44X_beam_dqm_sourceclient-live_cfg.py
import FWCore.ParameterSet.Config as cms
process = cms.Process("BeamMonitor")
#----------------------------
# Common part for PP and H.I Running
#-----------------------------
process.load("DQM.Integration.test.inputsource_cfi")
#--------------------------
# HLT Filter
process.load("HLTrigger.special.HLTTriggerTypeFilter_cfi")
# 0=random, 1=physics, 2=calibration, 3=technical
process.hltTriggerTypeFilter.SelectedTriggerType = 1
#----------------------------
# DQM Live Environment
#-----------------------------
process.load("DQM.Integration.test.environment_cfi")
process.dqmEnv.subSystemFolder = 'BeamMonitor'
import DQMServices.Components.DQMEnvironment_cfi
process.dqmEnvPixelLess = DQMServices.Components.DQMEnvironment_cfi.dqmEnv.clone()
process.dqmEnvPixelLess.subSystemFolder = 'BeamMonitor_PixelLess'
#----------------------------
# BeamMonitor
#-----------------------------
process.load("DQM.BeamMonitor.BeamMonitor_cff")
process.load("DQM.BeamMonitor.BeamMonitorBx_cff")
process.load("DQM.BeamMonitor.BeamMonitor_PixelLess_cff")
process.load("DQM.BeamMonitor.BeamConditionsMonitor_cff")
#### SETUP TRACKING RECONSTRUCTION ####
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load("DQM.Integration.test.FrontierCondition_GT_cfi")
process.load("Configuration.StandardSequences.RawToDigi_Data_cff")
# Change Beam Monitor variables
if process.dqmSaver.producer.value() is "Playback":
process.dqmBeamMonitor.BeamFitter.WriteAscii = False
process.dqmBeamMonitor.BeamFitter.AsciiFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.txt'
process.dqmBeamMonitor.BeamFitter.WriteDIPAscii = True
process.dqmBeamMonitor.BeamFitter.DIPFileName = '/nfshome0/dqmdev/BeamMonitorDQM/BeamFitResults.txt'
else:
process.dqmBeamMonitor.BeamFitter.WriteAscii = True
process.dqmBeamMonitor.BeamFitter.AsciiFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.txt'
process.dqmBeamMonitor.BeamFitter.WriteDIPAscii = True
process.dqmBeamMonitor.BeamFitter.DIPFileName = '/nfshome0/dqmpro/BeamMonitorDQM/BeamFitResults.txt'
#process.dqmBeamMonitor.BeamFitter.SaveFitResults = False
#process.dqmBeamMonitor.BeamFitter.OutputFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.root'
process.dqmBeamMonitorBx.BeamFitter.WriteAscii = True
process.dqmBeamMonitorBx.BeamFitter.AsciiFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults_Bx.txt'
## TKStatus
process.dqmTKStatus = cms.EDAnalyzer("TKStatus",
BeamFitter = cms.PSet(
DIPFileName = process.dqmBeamMonitor.BeamFitter.DIPFileName
)
)
process.dqmcommon = cms.Sequence(process.dqmEnv
*process.dqmSaver)
process.monitor = cms.Sequence(process.dqmBeamMonitor)
#--------------------------
# Proton-Proton Stuff
#--------------------------
if (process.runType.getRunType() == process.runType.pp_run or process.runType.getRunType() == process.runType.cosmic_run):
print "Running pp"
process.EventStreamHttpReader.SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('HLT_L1*',
'HLT_Jet*',
'HLT_*Cosmic*',
'HLT_HT*',
'HLT_MinBias_*',
'HLT_Physics*',
'HLT_ZeroBias_v*')
)
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("RecoTracker.IterativeTracking.iterativeTk_cff")
## Pixelless Tracking
process.load('RecoTracker/Configuration/RecoTrackerNotStandard_cff')
process.MeasurementTracker.pixelClusterProducer = cms.string("")
# Offline Beam Spot
process.load("RecoVertex.BeamSpotProducer.BeamSpot_cff")
## Offline PrimaryVertices
import RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi
process.offlinePrimaryVertices = RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi.offlinePrimaryVertices.clone()
process.dqmBeamMonitor.OnlineMode = True
process.dqmBeamMonitor.resetEveryNLumi = 5
process.dqmBeamMonitor.resetPVEveryNLumi = 5
process.dqmBeamMonitor.PVFitter.minNrVerticesForFit = 25
process.dqmBeamMonitor.BeamFitter.TrackCollection = cms.untracked.InputTag('generalTracks')
process.offlinePrimaryVertices.TrackLabel = cms.InputTag("generalTracks")
process.offlinePrimaryVertices.label=cms.string("")
process.offlinePrimaryVertices.minNdof=cms.double(0.0)
process.offlinePrimaryVertices.useBeamConstraint=cms.bool(False)
#TriggerName for selecting pv for DIP publication, NO wildcard needed here
#it will pick all triggers which has these strings in theri name
process.dqmBeamMonitor.jetTrigger = cms.untracked.vstring("HLT_ZeroBias_v",
"HLT_Jet300_v",
"HLT_QuadJet70_v")
process.dqmBeamMonitor.hltResults = cms.InputTag("TriggerResults","","HLT")
#fast general track reco
process.iterTracking =cms.Sequence(process.InitialStep
*process.LowPtTripletStep
*process.PixelPairStep
*process.DetachedTripletStep
*process.MixedTripletStep
*process.PixelLessStep
*process.TobTecStep
*process.generalTracks)
process.tracking_FirstStep = cms.Sequence(process.siPixelDigis
*process.siStripDigis
*process.trackerlocalreco
*process.offlineBeamSpot
*process.recopixelvertexing
*process.iterTracking)
process.p = cms.Path(process.scalersRawToDigi
*process.dqmTKStatus
*process.hltTriggerTypeFilter
*process.dqmcommon
*process.tracking_FirstStep
*process.offlinePrimaryVertices
*process.monitor)
#--------------------------------------------------
# Heavy Ion Stuff
#--------------------------------------------------
if (process.runType.getRunType() == process.runType.hi_run):
print "Running HI"
process.castorDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.csctfDigis.producer = cms.InputTag("rawDataRepacker")
process.dttfDigis.DTTF_FED_Source = cms.InputTag("rawDataRepacker")
process.ecalDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.ecalPreshowerDigis.sourceTag = cms.InputTag("rawDataRepacker")
process.gctDigis.inputLabel = cms.InputTag("rawDataRepacker")
process.gtDigis.DaqGtInputTag = cms.InputTag("rawDataRepacker")
process.gtEvmDigis.EvmGtInputTag = cms.InputTag("rawDataRepacker")
process.hcalDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.muonCSCDigis.InputObjects = cms.InputTag("rawDataRepacker")
process.muonDTDigis.inputLabel = cms.InputTag("rawDataRepacker")
process.muonRPCDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.scalersRawToDigi.scalersInputTag = cms.InputTag("rawDataRepacker")
#----------------------------
# Event Source
#-----------------------------
process.EventStreamHttpReader.SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring(
'HLT_HI*'
)
)
process.dqmBeamMonitor.OnlineMode = True ## in MC the LS are not ordered??
process.dqmBeamMonitor.resetEveryNLumi = 10
process.dqmBeamMonitor.resetPVEveryNLumi = 10
process.dqmBeamMonitor.BeamFitter.MinimumTotalLayers = 3 ## using pixel triplets
process.dqmBeamMonitor.PVFitter.minNrVerticesForFit = 20
process.dqmBeamMonitor.jetTrigger = cms.untracked.vstring("HLT_HI")
process.dqmBeamMonitor.hltResults = cms.InputTag("TriggerResults","","HLT")
## Load Heavy Ion Sequence
process.load("Configuration.StandardSequences.ReconstructionHeavyIons_cff") ## HI sequences
# Select events based on the pixel cluster multiplicity
import HLTrigger.special.hltPixelActivityFilter_cfi
process.multFilter = HLTrigger.special.hltPixelActivityFilter_cfi.hltPixelActivityFilter.clone(
inputTag = cms.InputTag('siPixelClusters'),
minClusters = cms.uint32(150),
maxClusters = cms.uint32(50000)
)
process.filter_step = cms.Sequence( process.siPixelDigis
*process.siPixelClusters
#*process.multFilter
)
process.HIRecoForDQM = cms.Sequence( process.siPixelDigis
*process.siPixelClusters
*process.siPixelRecHits
*process.offlineBeamSpot
*process.hiPixelVertices
*process.hiPixel3PrimTracks
)
# use HI pixel tracking and vertexing
process.dqmBeamMonitor.BeamFitter.TrackCollection = cms.untracked.InputTag('hiPixel3PrimTracks')
process.dqmBeamMonitorBx.BeamFitter.TrackCollection = cms.untracked.InputTag('hiPixel3PrimTracks')
process.dqmBeamMonitor.primaryVertex = cms.untracked.InputTag('hiSelectedVertex')
process.dqmBeamMonitor.PVFitter.VertexCollection = cms.untracked.InputTag('hiSelectedVertex')
# make pixel vertexing less sensitive to incorrect beamspot
process.hiPixel3ProtoTracks.RegionFactoryPSet.RegionPSet.originRadius = 0.2
process.hiPixel3ProtoTracks.RegionFactoryPSet.RegionPSet.fixedError = 0.5
process.hiSelectedProtoTracks.maxD0Significance = 100
process.hiPixelAdaptiveVertex.TkFilterParameters.maxD0Significance = 100
process.hiPixelAdaptiveVertex.vertexCollections.useBeamConstraint = False
#not working due to wrong tag of reco
process.hiPixelAdaptiveVertex.vertexCollections.maxDistanceToBeam = 1.0
process.p = cms.Path(process.scalersRawToDigi
*process.dqmTKStatus
*process.hltTriggerTypeFilter
*process.filter_step
*process.HIRecoForDQM
*process.dqmcommon
*process.monitor)
| <filename>DQM/BeamMonitor/test/44X_beam_dqm_sourceclient-live_cfg.py
import FWCore.ParameterSet.Config as cms
process = cms.Process("BeamMonitor")
#----------------------------
# Common part for PP and H.I Running
#-----------------------------
process.load("DQM.Integration.test.inputsource_cfi")
#--------------------------
# HLT Filter
process.load("HLTrigger.special.HLTTriggerTypeFilter_cfi")
# 0=random, 1=physics, 2=calibration, 3=technical
process.hltTriggerTypeFilter.SelectedTriggerType = 1
#----------------------------
# DQM Live Environment
#-----------------------------
process.load("DQM.Integration.test.environment_cfi")
process.dqmEnv.subSystemFolder = 'BeamMonitor'
import DQMServices.Components.DQMEnvironment_cfi
process.dqmEnvPixelLess = DQMServices.Components.DQMEnvironment_cfi.dqmEnv.clone()
process.dqmEnvPixelLess.subSystemFolder = 'BeamMonitor_PixelLess'
#----------------------------
# BeamMonitor
#-----------------------------
process.load("DQM.BeamMonitor.BeamMonitor_cff")
process.load("DQM.BeamMonitor.BeamMonitorBx_cff")
process.load("DQM.BeamMonitor.BeamMonitor_PixelLess_cff")
process.load("DQM.BeamMonitor.BeamConditionsMonitor_cff")
#### SETUP TRACKING RECONSTRUCTION ####
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load("DQM.Integration.test.FrontierCondition_GT_cfi")
process.load("Configuration.StandardSequences.RawToDigi_Data_cff")
# Change Beam Monitor variables
if process.dqmSaver.producer.value() is "Playback":
process.dqmBeamMonitor.BeamFitter.WriteAscii = False
process.dqmBeamMonitor.BeamFitter.AsciiFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.txt'
process.dqmBeamMonitor.BeamFitter.WriteDIPAscii = True
process.dqmBeamMonitor.BeamFitter.DIPFileName = '/nfshome0/dqmdev/BeamMonitorDQM/BeamFitResults.txt'
else:
process.dqmBeamMonitor.BeamFitter.WriteAscii = True
process.dqmBeamMonitor.BeamFitter.AsciiFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.txt'
process.dqmBeamMonitor.BeamFitter.WriteDIPAscii = True
process.dqmBeamMonitor.BeamFitter.DIPFileName = '/nfshome0/dqmpro/BeamMonitorDQM/BeamFitResults.txt'
#process.dqmBeamMonitor.BeamFitter.SaveFitResults = False
#process.dqmBeamMonitor.BeamFitter.OutputFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.root'
process.dqmBeamMonitorBx.BeamFitter.WriteAscii = True
process.dqmBeamMonitorBx.BeamFitter.AsciiFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults_Bx.txt'
## TKStatus
process.dqmTKStatus = cms.EDAnalyzer("TKStatus",
BeamFitter = cms.PSet(
DIPFileName = process.dqmBeamMonitor.BeamFitter.DIPFileName
)
)
process.dqmcommon = cms.Sequence(process.dqmEnv
*process.dqmSaver)
process.monitor = cms.Sequence(process.dqmBeamMonitor)
#--------------------------
# Proton-Proton Stuff
#--------------------------
if (process.runType.getRunType() == process.runType.pp_run or process.runType.getRunType() == process.runType.cosmic_run):
print "Running pp"
process.EventStreamHttpReader.SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('HLT_L1*',
'HLT_Jet*',
'HLT_*Cosmic*',
'HLT_HT*',
'HLT_MinBias_*',
'HLT_Physics*',
'HLT_ZeroBias_v*')
)
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("RecoTracker.IterativeTracking.iterativeTk_cff")
## Pixelless Tracking
process.load('RecoTracker/Configuration/RecoTrackerNotStandard_cff')
process.MeasurementTracker.pixelClusterProducer = cms.string("")
# Offline Beam Spot
process.load("RecoVertex.BeamSpotProducer.BeamSpot_cff")
## Offline PrimaryVertices
import RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi
process.offlinePrimaryVertices = RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi.offlinePrimaryVertices.clone()
process.dqmBeamMonitor.OnlineMode = True
process.dqmBeamMonitor.resetEveryNLumi = 5
process.dqmBeamMonitor.resetPVEveryNLumi = 5
process.dqmBeamMonitor.PVFitter.minNrVerticesForFit = 25
process.dqmBeamMonitor.BeamFitter.TrackCollection = cms.untracked.InputTag('generalTracks')
process.offlinePrimaryVertices.TrackLabel = cms.InputTag("generalTracks")
process.offlinePrimaryVertices.label=cms.string("")
process.offlinePrimaryVertices.minNdof=cms.double(0.0)
process.offlinePrimaryVertices.useBeamConstraint=cms.bool(False)
#TriggerName for selecting pv for DIP publication, NO wildcard needed here
#it will pick all triggers which has these strings in theri name
process.dqmBeamMonitor.jetTrigger = cms.untracked.vstring("HLT_ZeroBias_v",
"HLT_Jet300_v",
"HLT_QuadJet70_v")
process.dqmBeamMonitor.hltResults = cms.InputTag("TriggerResults","","HLT")
#fast general track reco
process.iterTracking =cms.Sequence(process.InitialStep
*process.LowPtTripletStep
*process.PixelPairStep
*process.DetachedTripletStep
*process.MixedTripletStep
*process.PixelLessStep
*process.TobTecStep
*process.generalTracks)
process.tracking_FirstStep = cms.Sequence(process.siPixelDigis
*process.siStripDigis
*process.trackerlocalreco
*process.offlineBeamSpot
*process.recopixelvertexing
*process.iterTracking)
process.p = cms.Path(process.scalersRawToDigi
*process.dqmTKStatus
*process.hltTriggerTypeFilter
*process.dqmcommon
*process.tracking_FirstStep
*process.offlinePrimaryVertices
*process.monitor)
#--------------------------------------------------
# Heavy Ion Stuff
#--------------------------------------------------
if (process.runType.getRunType() == process.runType.hi_run):
print "Running HI"
process.castorDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.csctfDigis.producer = cms.InputTag("rawDataRepacker")
process.dttfDigis.DTTF_FED_Source = cms.InputTag("rawDataRepacker")
process.ecalDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.ecalPreshowerDigis.sourceTag = cms.InputTag("rawDataRepacker")
process.gctDigis.inputLabel = cms.InputTag("rawDataRepacker")
process.gtDigis.DaqGtInputTag = cms.InputTag("rawDataRepacker")
process.gtEvmDigis.EvmGtInputTag = cms.InputTag("rawDataRepacker")
process.hcalDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.muonCSCDigis.InputObjects = cms.InputTag("rawDataRepacker")
process.muonDTDigis.inputLabel = cms.InputTag("rawDataRepacker")
process.muonRPCDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.scalersRawToDigi.scalersInputTag = cms.InputTag("rawDataRepacker")
#----------------------------
# Event Source
#-----------------------------
process.EventStreamHttpReader.SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring(
'HLT_HI*'
)
)
process.dqmBeamMonitor.OnlineMode = True ## in MC the LS are not ordered??
process.dqmBeamMonitor.resetEveryNLumi = 10
process.dqmBeamMonitor.resetPVEveryNLumi = 10
process.dqmBeamMonitor.BeamFitter.MinimumTotalLayers = 3 ## using pixel triplets
process.dqmBeamMonitor.PVFitter.minNrVerticesForFit = 20
process.dqmBeamMonitor.jetTrigger = cms.untracked.vstring("HLT_HI")
process.dqmBeamMonitor.hltResults = cms.InputTag("TriggerResults","","HLT")
## Load Heavy Ion Sequence
process.load("Configuration.StandardSequences.ReconstructionHeavyIons_cff") ## HI sequences
# Select events based on the pixel cluster multiplicity
import HLTrigger.special.hltPixelActivityFilter_cfi
process.multFilter = HLTrigger.special.hltPixelActivityFilter_cfi.hltPixelActivityFilter.clone(
inputTag = cms.InputTag('siPixelClusters'),
minClusters = cms.uint32(150),
maxClusters = cms.uint32(50000)
)
process.filter_step = cms.Sequence( process.siPixelDigis
*process.siPixelClusters
#*process.multFilter
)
process.HIRecoForDQM = cms.Sequence( process.siPixelDigis
*process.siPixelClusters
*process.siPixelRecHits
*process.offlineBeamSpot
*process.hiPixelVertices
*process.hiPixel3PrimTracks
)
# use HI pixel tracking and vertexing
process.dqmBeamMonitor.BeamFitter.TrackCollection = cms.untracked.InputTag('hiPixel3PrimTracks')
process.dqmBeamMonitorBx.BeamFitter.TrackCollection = cms.untracked.InputTag('hiPixel3PrimTracks')
process.dqmBeamMonitor.primaryVertex = cms.untracked.InputTag('hiSelectedVertex')
process.dqmBeamMonitor.PVFitter.VertexCollection = cms.untracked.InputTag('hiSelectedVertex')
# make pixel vertexing less sensitive to incorrect beamspot
process.hiPixel3ProtoTracks.RegionFactoryPSet.RegionPSet.originRadius = 0.2
process.hiPixel3ProtoTracks.RegionFactoryPSet.RegionPSet.fixedError = 0.5
process.hiSelectedProtoTracks.maxD0Significance = 100
process.hiPixelAdaptiveVertex.TkFilterParameters.maxD0Significance = 100
process.hiPixelAdaptiveVertex.vertexCollections.useBeamConstraint = False
#not working due to wrong tag of reco
process.hiPixelAdaptiveVertex.vertexCollections.maxDistanceToBeam = 1.0
process.p = cms.Path(process.scalersRawToDigi
*process.dqmTKStatus
*process.hltTriggerTypeFilter
*process.filter_step
*process.HIRecoForDQM
*process.dqmcommon
*process.monitor)
| en | 0.35688 | #---------------------------- # Common part for PP and H.I Running #----------------------------- #-------------------------- # HLT Filter # 0=random, 1=physics, 2=calibration, 3=technical #---------------------------- # DQM Live Environment #----------------------------- #---------------------------- # BeamMonitor #----------------------------- #### SETUP TRACKING RECONSTRUCTION #### # Change Beam Monitor variables #process.dqmBeamMonitor.BeamFitter.SaveFitResults = False #process.dqmBeamMonitor.BeamFitter.OutputFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.root' ## TKStatus #-------------------------- # Proton-Proton Stuff #-------------------------- ## Pixelless Tracking # Offline Beam Spot ## Offline PrimaryVertices #TriggerName for selecting pv for DIP publication, NO wildcard needed here #it will pick all triggers which has these strings in theri name #fast general track reco #-------------------------------------------------- # Heavy Ion Stuff #-------------------------------------------------- #---------------------------- # Event Source #----------------------------- ## in MC the LS are not ordered?? ## using pixel triplets ## Load Heavy Ion Sequence ## HI sequences # Select events based on the pixel cluster multiplicity #*process.multFilter # use HI pixel tracking and vertexing # make pixel vertexing less sensitive to incorrect beamspot #not working due to wrong tag of reco | 1.583143 | 2 |
tests/routes/test_hackers.py | TorrentofShame/hackathon-2021-backend | 0 | 10604 | <filename>tests/routes/test_hackers.py<gh_stars>0
# flake8: noqa
import json
from src.models.hacker import Hacker
from tests.base import BaseTestCase
from datetime import datetime
class TestHackersBlueprint(BaseTestCase):
"""Tests for the Hackers Endpoints"""
"""create_hacker"""
def test_create_hacker(self):
now = datetime.now()
res = self.client.post(
"/api/hackers/",
data={"hacker": json.dumps(
{
"email": "<EMAIL>",
"date": now.isoformat(),
}
)},
content_type="multipart/form-data",
)
self.assertEqual(res.status_code, 201)
self.assertEqual(Hacker.objects.count(), 1)
def test_create_hacker_invalid_json(self):
res = self.client.post(
"/api/hackers/", data={"hacker": json.dumps({})}, content_type="multipart/form-data"
)
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 400)
self.assertEqual(data["name"], "Bad Request")
self.assertEqual(Hacker.objects.count(), 0)
def test_create_hacker_duplicate_user(self):
now = datetime.now()
Hacker.createOne(
email="<EMAIL>"
)
res = self.client.post(
"/api/hackers/",
data={"hacker": json.dumps(
{
"email": "<EMAIL>",
"date": now.isoformat(),
}
)},
content_type="multipart/form-data",
)
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 409)
self.assertIn(
"Sorry, that email already exists.", data["description"]
)
self.assertEqual(Hacker.objects.count(), 1)
def test_create_hacker_invalid_datatypes(self):
res = self.client.post(
"/api/hackers/",
data=json.dumps(
{"email": "notanemail"}
),
content_type="application/json",
)
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 400)
self.assertEqual(data["name"], "Bad Request")
self.assertEqual(Hacker.objects.count(), 0)
"""get_all_hackers"""
def test_get_all_hackers(self):
Hacker.createOne(
email="<EMAIL>"
)
Hacker.createOne(
email="<EMAIL>",
)
res = self.client.get("/api/hackers/get_all_hackers/")
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 201)
self.assertEqual(data["hackers"][0]["email"], "<EMAIL>")
self.assertEqual(data["hackers"][1]["email"], "<EMAIL>")
def test_get_all_hackers_not_found(self):
res = self.client.get("/api/hackers/get_all_hackers/")
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 404)
self.assertEqual(data["name"], "Not Found")
| <filename>tests/routes/test_hackers.py<gh_stars>0
# flake8: noqa
import json
from src.models.hacker import Hacker
from tests.base import BaseTestCase
from datetime import datetime
class TestHackersBlueprint(BaseTestCase):
"""Tests for the Hackers Endpoints"""
"""create_hacker"""
def test_create_hacker(self):
now = datetime.now()
res = self.client.post(
"/api/hackers/",
data={"hacker": json.dumps(
{
"email": "<EMAIL>",
"date": now.isoformat(),
}
)},
content_type="multipart/form-data",
)
self.assertEqual(res.status_code, 201)
self.assertEqual(Hacker.objects.count(), 1)
def test_create_hacker_invalid_json(self):
res = self.client.post(
"/api/hackers/", data={"hacker": json.dumps({})}, content_type="multipart/form-data"
)
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 400)
self.assertEqual(data["name"], "Bad Request")
self.assertEqual(Hacker.objects.count(), 0)
def test_create_hacker_duplicate_user(self):
now = datetime.now()
Hacker.createOne(
email="<EMAIL>"
)
res = self.client.post(
"/api/hackers/",
data={"hacker": json.dumps(
{
"email": "<EMAIL>",
"date": now.isoformat(),
}
)},
content_type="multipart/form-data",
)
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 409)
self.assertIn(
"Sorry, that email already exists.", data["description"]
)
self.assertEqual(Hacker.objects.count(), 1)
def test_create_hacker_invalid_datatypes(self):
res = self.client.post(
"/api/hackers/",
data=json.dumps(
{"email": "notanemail"}
),
content_type="application/json",
)
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 400)
self.assertEqual(data["name"], "Bad Request")
self.assertEqual(Hacker.objects.count(), 0)
"""get_all_hackers"""
def test_get_all_hackers(self):
Hacker.createOne(
email="<EMAIL>"
)
Hacker.createOne(
email="<EMAIL>",
)
res = self.client.get("/api/hackers/get_all_hackers/")
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 201)
self.assertEqual(data["hackers"][0]["email"], "<EMAIL>")
self.assertEqual(data["hackers"][1]["email"], "<EMAIL>")
def test_get_all_hackers_not_found(self):
res = self.client.get("/api/hackers/get_all_hackers/")
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 404)
self.assertEqual(data["name"], "Not Found")
| en | 0.384189 | # flake8: noqa Tests for the Hackers Endpoints create_hacker get_all_hackers | 2.516827 | 3 |
open_cp/sources/chicago.py | sumau/PredictCode | 18 | 10605 | """
sources.chicago
===============
Reads a CSV file in the format (as of April 2017) of data available from:
- https://catalog.data.gov/dataset/crimes-one-year-prior-to-present-e171f
- https://catalog.data.gov/dataset/crimes-2001-to-present-398a4
The default data is loaded from a file "chicago.csv" which should be downloaded
from one of the above links. The format of the data, frustratingly, differs
between the snapshot of last year, and the total.
The data is partly anonymous in that the address within a block is obscured,
while the geocoding seems complicated (work in progress to understand)...
The crime type "HOMICIDE" is reported multiple times in the dataset.
"""
import csv as _csv
import os.path as _path
import datetime
import numpy as _np
from ..data import TimedPoints
_datadir = None
_default_filename = "chicago.csv"
_FEET_IN_METERS = 3937 / 1200
def set_data_directory(datadir):
"""Set the default location for search for the default input file."""
global _datadir
_datadir = datadir
def get_default_filename():
"""Returns the default filename, if available. Otherwise raises
AttributeError.
"""
global _datadir
if _datadir is None:
raise AttributeError("datadir not set; call `set_data_directory()`.")
return _path.join(_datadir, _default_filename)
def _date_from_csv(date_string):
return datetime.datetime.strptime(date_string, "%m/%d/%Y %I:%M:%S %p")
def date_from_iso(iso_string):
"""Convert a datetime string in ISO format into a :class:`datetime`
instance.
:param iso_string: Like "2017-10-23T05:12:39"
:return: A :class:`datetime` instance.
"""
return datetime.datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S")
def _date_from_other(dt_str):
# Like 4/16/13 5:00
try:
date, time = dt_str.split()
month, day, year = date.split("/")
hour, minutes = time.split(":")
return datetime.datetime(year=int(year)+2000, month=int(month), day=int(day),
hour=int(hour), minute=int(minutes))
except Exception as ex:
raise Exception("Failed to parse {}, cause {}/{}".format(dt_str, type(ex), ex))
_FIELDS = {
"snapshot" : {
"_DESCRIPTION_FIELD" : ' PRIMARY DESCRIPTION',
"_X_FIELD" : 'X COORDINATE',
"_Y_FIELD" : 'Y COORDINATE',
"_TIME_FIELD" : 'DATE OF OCCURRENCE',
"_GEOJSON_LOOKUP" : {"case": 'CASE#',
"address": "BLOCK",
"location": ' LOCATION DESCRIPTION',
"crime": ' PRIMARY DESCRIPTION',
"type": ' SECONDARY DESCRIPTION',
"timestamp": 'DATE OF OCCURRENCE'},
"GEOJSON_COORDS" : ('LONGITUDE', 'LATITUDE'),
"DT_CONVERT" : _date_from_csv
},
"all" : {
"_DESCRIPTION_FIELD" : 'Primary Type',
"_X_FIELD" : 'X Coordinate',
"_Y_FIELD" : 'Y Coordinate',
"_TIME_FIELD" : 'Date',
"_GEOJSON_LOOKUP" : {"case": 'Case Number',
"address": "Block",
"location": 'Location Description',
"crime": 'Primary Type',
"type": 'Description',
"timestamp": 'Date'},
"GEOJSON_COORDS" : ('Longitude', 'Latitude'),
"DT_CONVERT" : _date_from_csv
},
"gen" : {
"_DESCRIPTION_FIELD" : 'CRIME',
"_X_FIELD" : 'X',
"_Y_FIELD" : 'Y',
"_TIME_FIELD" : 'TIMESTAMP',
"_GEOJSON_LOOKUP" : {"case": 'CASE',
"address": "BLOCK",
"location": 'LOCATION',
"crime": 'CRIME',
"type": 'SUB-TYPE',
"timestamp": 'TIMESTAMP'},
"GEOJSON_COORDS" : ('X', 'Y'),
"DT_CONVERT" : _date_from_csv
}
}
_FIELDS["all_other"] = dict(_FIELDS["all"])
_FIELDS["all_other"]["DT_CONVERT"] = _date_from_other
def _convert_header(header, dic):
lookup = dict()
for field in [dic["_DESCRIPTION_FIELD"], dic["_X_FIELD"], dic["_Y_FIELD"], dic["_TIME_FIELD"]]:
if not field in header:
raise Exception("No field '{}' found in header".format(field))
lookup[field] = header.index(field)
return lookup
def default_burglary_data():
"""Load the default data, if available, giving just "THEFT" data.
:return: An instance of :class:`open_cp.data.TimedPoints` or `None`.
"""
try:
return load(get_default_filename(), {"THEFT"})
except Exception:
return None
def _get_dic(type):
try:
return _FIELDS[type]
except KeyError:
raise ValueError("Don't understand type {}".format(type))
def _load_to_list(file, dic, primary_description_names):
reader = _csv.reader(file)
lookup = _convert_header(next(reader), dic)
dt_convert = dic["DT_CONVERT"]
data = []
for row in reader:
description = row[lookup[dic["_DESCRIPTION_FIELD"]]].strip()
if not description in primary_description_names:
continue
x = row[lookup[dic["_X_FIELD"]]].strip()
y = row[lookup[dic["_Y_FIELD"]]].strip()
t = row[lookup[dic["_TIME_FIELD"]]].strip()
if x != "" and y != "":
data.append((dt_convert(t), float(x), float(y)))
return data
def load(file, primary_description_names, to_meters=True, type="snapshot"):
"""Load data from a CSV file in the expected format.
:param file: Name of the CSV file load, or a file-like object.
:param primary_description_names: Set of names to search for in the
"primary description field". E.g. pass `{"THEFT"}` to return only the
"theft" crime type.
:param to_meters: Convert the coordinates to meters; True by default.
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:return: An instance of :class:`open_cp.data.TimedPoints` or `None`.
"""
dic = _get_dic(type)
if isinstance(file, str):
with open(file) as file:
data = _load_to_list(file, dic, primary_description_names)
else:
data = _load_to_list(file, dic, primary_description_names)
data.sort(key = lambda triple : triple[0])
xcoords = _np.empty(len(data))
ycoords = _np.empty(len(data))
for i, (_, x, y) in enumerate(data):
xcoords[i], ycoords[i] = x, y
times = [t for t, _, _ in data]
if to_meters:
xcoords /= _FEET_IN_METERS
ycoords /= _FEET_IN_METERS
return TimedPoints.from_coords(times, xcoords, ycoords)
def _convert_header_for_geojson(header, dic):
try:
column_lookup = {}
for key, col_head in dic["_GEOJSON_LOOKUP"].items():
column_lookup[key] = header.index(col_head)
coord_lookup = [header.index(chead) for chead in dic["GEOJSON_COORDS"]]
return column_lookup, coord_lookup
except KeyError as ex:
raise ValueError("Header not in expected format: {} caused by {}/{}".format(
header, type(ex), ex))
def _generate_GeoJSON_Features(file, dic):
dt_convert = dic["DT_CONVERT"]
reader = _csv.reader(file)
column_lookup, coord_lookup = _convert_header_for_geojson(next(reader), dic)
for row in reader:
properties = {key : row[i] for key, i in column_lookup.items()}
properties["timestamp"] = dt_convert(properties["timestamp"]).isoformat()
if row[coord_lookup[0]] == "":
geometry = None
else:
coordinates = [float(row[i]) for i in coord_lookup]
geometry = {"type":"Point", "coordinates":coordinates}
yield {"geometry": geometry, "properties": properties,
"type": "Feature"}
def generate_GeoJSON_Features(file, type="snapshot"):
"""Generate a sequence of GeoJSON "features" from the CSV file.
See :func:`load_to_GeoJSON`.
:param file: Either a filename, or a file object.
"""
dic = _get_dic(type)
if isinstance(file, str):
with open(file) as f:
yield from _generate_GeoJSON_Features(f, dic)
else:
yield from _generate_GeoJSON_Features(file, dic)
def load_to_GeoJSON(filename, type="snapshot"):
"""Load the specified CSV file to a list of GeoJSON (see
http://geojson.org/) features. Events with no location data have `None`
as the geometry. Timestamps are converted to standard ISO string format.
The returned "properties" have these keys:
- "case" for the "CASE#" field
- "crime" for the "PRIMARY DESCRIPTION" field
- "type" for the "SECONDARY DESCRIPTION" field
- "location" for the "LOCATION DESCRIPTION" field
- "timestamp" for the "DATE OF OCCURRENCE" field
- "address" for the "BLOCK" field
:param filename: Filename of the CSV file to process
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:return: List of Python dictionaries in GeoJSON format.
"""
return list(generate_GeoJSON_Features(filename, type))
try:
import geopandas as gpd
import shapely.geometry as _geometry
except:
gpd = None
_geometry = None
def convert_null_geometry_to_empty(frame):
"""Utility method. Convert any geometry in the geoDataFrame which is
"null" (`None` or empty) to a Point type geometry which is empty. The
returned geoDateFrame is suitable for projecting and other geometrical
transformations.
"""
def null_to_point(x):
if x is None or x.is_empty:
return _geometry.Point()
return x
newgeo = frame.geometry.map(null_to_point)
return frame.set_geometry(newgeo)
def convert_null_geometry_to_none(frame):
"""Utility method. Convert any geometry in the geoDataFrame which is
"null" (`None` or empty) to `None`. The returned geoDateFrame is suitable
for saving.
"""
def null_to_none(x):
if x is None or x.is_empty:
return None
return x
newgeo = frame.geometry.map(null_to_none)
return frame.set_geometry(newgeo)
def load_to_geoDataFrame(filename, datetime_as_string=True,
type="snapshot", empty_geometry="none"):
"""Return the same data as :func:`load_to_GeoJSON` but as a geoPandas
data-frame.
:param filename: Filename of the CSV file to process
:param datetime_as_string: Write the timestamp as an ISO formatted string.
Defaults to True which is best for saving the dataframe as e.g. a shape
file. Set to False to get timestamps as python objects, which is best
for using (geo)pandas to analyse the data.
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:param empty_geometry: Either "none" to return `None` as the geometry of
crimes which have no location data in the CSV file (this is correct if
you wish to save the data-frame); or "empty" to return an empty `Point`
type (which is correct, for example, if you wish to re-project the
data-frame). Yes, GeoPandas appears to be annoying like this.
"""
geo_data = load_to_GeoJSON(filename, type=type)
if not datetime_as_string:
for feature in geo_data:
feature["properties"]["timestamp"] = _date_from_iso(feature["properties"]["timestamp"])
frame = gpd.GeoDataFrame.from_features(geo_data)
if empty_geometry == "none":
pass
elif empty_geometry == "empty":
frame = convert_null_geometry_to_empty(frame)
else:
raise ValueError("Unknown `empty_geometry` parameter `{}`".format(empty_geometry))
frame.crs = {"init":"epsg:4326"}
return frame
_sides = None
def _load_sides():
global _sides
if _sides is not None:
return
global _datadir
geojson = _path.join(_datadir, "Chicago_Areas.geojson")
frame = gpd.read_file(geojson)
side_mapping = {
"Far North" : [1,2,3,4,9,10,11,12,13,14,76,77],
"Northwest" : [15,16,17,18,19,20],
"North" : [5,6,7,21,22],
"West" : list(range(23, 32)),
"Central" : [8,32,33],
"South" : list(range(34,44)) + [60, 69],
"Southwest" : [56,57,58,59] + list(range(61,69)),
"Far Southwest" : list(range(70,76)),
"Far Southeast" : list(range(44,56))
}
frame["side"] = frame.area_numbe.map(lambda x : next(key
for key, item in side_mapping.items() if int(x) in item) )
_sides = frame.drop(["area", "area_num_1", "comarea", "comarea_id",
"perimeter", "shape_area", "shape_len"], axis=1)
_sides.crs = {"init": "epsg:4326"}
_sides = _sides.to_crs({"init": "epsg:2790"})
def get_side(name):
"""Return a geometry (a polygon, typically) of the outline of the shape
of the given "side" of Chicago, projected to {"init":"epsg:2790"}, which
is Illinois in metres.
Needs the file "Chicago_Areas.geojson" to be in the "datadir". This can
be downloaded from:
https://data.cityofchicago.org/Facilities-Geographic-Boundaries/Boundaries-Community-Areas-current-/cauq-8yn6
:param name: One of "Far North", "Northwest", "North", "West", "Central",
"South", "Southwest", "Far Southwest", "Far Southeast"
"""
_load_sides()
return _sides[_sides.side == name].unary_union
| """
sources.chicago
===============
Reads a CSV file in the format (as of April 2017) of data available from:
- https://catalog.data.gov/dataset/crimes-one-year-prior-to-present-e171f
- https://catalog.data.gov/dataset/crimes-2001-to-present-398a4
The default data is loaded from a file "chicago.csv" which should be downloaded
from one of the above links. The format of the data, frustratingly, differs
between the snapshot of last year, and the total.
The data is partly anonymous in that the address within a block is obscured,
while the geocoding seems complicated (work in progress to understand)...
The crime type "HOMICIDE" is reported multiple times in the dataset.
"""
import csv as _csv
import os.path as _path
import datetime
import numpy as _np
from ..data import TimedPoints
_datadir = None
_default_filename = "chicago.csv"
_FEET_IN_METERS = 3937 / 1200
def set_data_directory(datadir):
"""Set the default location for search for the default input file."""
global _datadir
_datadir = datadir
def get_default_filename():
"""Returns the default filename, if available. Otherwise raises
AttributeError.
"""
global _datadir
if _datadir is None:
raise AttributeError("datadir not set; call `set_data_directory()`.")
return _path.join(_datadir, _default_filename)
def _date_from_csv(date_string):
return datetime.datetime.strptime(date_string, "%m/%d/%Y %I:%M:%S %p")
def date_from_iso(iso_string):
"""Convert a datetime string in ISO format into a :class:`datetime`
instance.
:param iso_string: Like "2017-10-23T05:12:39"
:return: A :class:`datetime` instance.
"""
return datetime.datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S")
def _date_from_other(dt_str):
# Like 4/16/13 5:00
try:
date, time = dt_str.split()
month, day, year = date.split("/")
hour, minutes = time.split(":")
return datetime.datetime(year=int(year)+2000, month=int(month), day=int(day),
hour=int(hour), minute=int(minutes))
except Exception as ex:
raise Exception("Failed to parse {}, cause {}/{}".format(dt_str, type(ex), ex))
_FIELDS = {
"snapshot" : {
"_DESCRIPTION_FIELD" : ' PRIMARY DESCRIPTION',
"_X_FIELD" : 'X COORDINATE',
"_Y_FIELD" : 'Y COORDINATE',
"_TIME_FIELD" : 'DATE OF OCCURRENCE',
"_GEOJSON_LOOKUP" : {"case": 'CASE#',
"address": "BLOCK",
"location": ' LOCATION DESCRIPTION',
"crime": ' PRIMARY DESCRIPTION',
"type": ' SECONDARY DESCRIPTION',
"timestamp": 'DATE OF OCCURRENCE'},
"GEOJSON_COORDS" : ('LONGITUDE', 'LATITUDE'),
"DT_CONVERT" : _date_from_csv
},
"all" : {
"_DESCRIPTION_FIELD" : 'Primary Type',
"_X_FIELD" : 'X Coordinate',
"_Y_FIELD" : 'Y Coordinate',
"_TIME_FIELD" : 'Date',
"_GEOJSON_LOOKUP" : {"case": 'Case Number',
"address": "Block",
"location": 'Location Description',
"crime": 'Primary Type',
"type": 'Description',
"timestamp": 'Date'},
"GEOJSON_COORDS" : ('Longitude', 'Latitude'),
"DT_CONVERT" : _date_from_csv
},
"gen" : {
"_DESCRIPTION_FIELD" : 'CRIME',
"_X_FIELD" : 'X',
"_Y_FIELD" : 'Y',
"_TIME_FIELD" : 'TIMESTAMP',
"_GEOJSON_LOOKUP" : {"case": 'CASE',
"address": "BLOCK",
"location": 'LOCATION',
"crime": 'CRIME',
"type": 'SUB-TYPE',
"timestamp": 'TIMESTAMP'},
"GEOJSON_COORDS" : ('X', 'Y'),
"DT_CONVERT" : _date_from_csv
}
}
_FIELDS["all_other"] = dict(_FIELDS["all"])
_FIELDS["all_other"]["DT_CONVERT"] = _date_from_other
def _convert_header(header, dic):
lookup = dict()
for field in [dic["_DESCRIPTION_FIELD"], dic["_X_FIELD"], dic["_Y_FIELD"], dic["_TIME_FIELD"]]:
if not field in header:
raise Exception("No field '{}' found in header".format(field))
lookup[field] = header.index(field)
return lookup
def default_burglary_data():
"""Load the default data, if available, giving just "THEFT" data.
:return: An instance of :class:`open_cp.data.TimedPoints` or `None`.
"""
try:
return load(get_default_filename(), {"THEFT"})
except Exception:
return None
def _get_dic(type):
try:
return _FIELDS[type]
except KeyError:
raise ValueError("Don't understand type {}".format(type))
def _load_to_list(file, dic, primary_description_names):
reader = _csv.reader(file)
lookup = _convert_header(next(reader), dic)
dt_convert = dic["DT_CONVERT"]
data = []
for row in reader:
description = row[lookup[dic["_DESCRIPTION_FIELD"]]].strip()
if not description in primary_description_names:
continue
x = row[lookup[dic["_X_FIELD"]]].strip()
y = row[lookup[dic["_Y_FIELD"]]].strip()
t = row[lookup[dic["_TIME_FIELD"]]].strip()
if x != "" and y != "":
data.append((dt_convert(t), float(x), float(y)))
return data
def load(file, primary_description_names, to_meters=True, type="snapshot"):
"""Load data from a CSV file in the expected format.
:param file: Name of the CSV file load, or a file-like object.
:param primary_description_names: Set of names to search for in the
"primary description field". E.g. pass `{"THEFT"}` to return only the
"theft" crime type.
:param to_meters: Convert the coordinates to meters; True by default.
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:return: An instance of :class:`open_cp.data.TimedPoints` or `None`.
"""
dic = _get_dic(type)
if isinstance(file, str):
with open(file) as file:
data = _load_to_list(file, dic, primary_description_names)
else:
data = _load_to_list(file, dic, primary_description_names)
data.sort(key = lambda triple : triple[0])
xcoords = _np.empty(len(data))
ycoords = _np.empty(len(data))
for i, (_, x, y) in enumerate(data):
xcoords[i], ycoords[i] = x, y
times = [t for t, _, _ in data]
if to_meters:
xcoords /= _FEET_IN_METERS
ycoords /= _FEET_IN_METERS
return TimedPoints.from_coords(times, xcoords, ycoords)
def _convert_header_for_geojson(header, dic):
try:
column_lookup = {}
for key, col_head in dic["_GEOJSON_LOOKUP"].items():
column_lookup[key] = header.index(col_head)
coord_lookup = [header.index(chead) for chead in dic["GEOJSON_COORDS"]]
return column_lookup, coord_lookup
except KeyError as ex:
raise ValueError("Header not in expected format: {} caused by {}/{}".format(
header, type(ex), ex))
def _generate_GeoJSON_Features(file, dic):
dt_convert = dic["DT_CONVERT"]
reader = _csv.reader(file)
column_lookup, coord_lookup = _convert_header_for_geojson(next(reader), dic)
for row in reader:
properties = {key : row[i] for key, i in column_lookup.items()}
properties["timestamp"] = dt_convert(properties["timestamp"]).isoformat()
if row[coord_lookup[0]] == "":
geometry = None
else:
coordinates = [float(row[i]) for i in coord_lookup]
geometry = {"type":"Point", "coordinates":coordinates}
yield {"geometry": geometry, "properties": properties,
"type": "Feature"}
def generate_GeoJSON_Features(file, type="snapshot"):
"""Generate a sequence of GeoJSON "features" from the CSV file.
See :func:`load_to_GeoJSON`.
:param file: Either a filename, or a file object.
"""
dic = _get_dic(type)
if isinstance(file, str):
with open(file) as f:
yield from _generate_GeoJSON_Features(f, dic)
else:
yield from _generate_GeoJSON_Features(file, dic)
def load_to_GeoJSON(filename, type="snapshot"):
"""Load the specified CSV file to a list of GeoJSON (see
http://geojson.org/) features. Events with no location data have `None`
as the geometry. Timestamps are converted to standard ISO string format.
The returned "properties" have these keys:
- "case" for the "CASE#" field
- "crime" for the "PRIMARY DESCRIPTION" field
- "type" for the "SECONDARY DESCRIPTION" field
- "location" for the "LOCATION DESCRIPTION" field
- "timestamp" for the "DATE OF OCCURRENCE" field
- "address" for the "BLOCK" field
:param filename: Filename of the CSV file to process
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:return: List of Python dictionaries in GeoJSON format.
"""
return list(generate_GeoJSON_Features(filename, type))
try:
import geopandas as gpd
import shapely.geometry as _geometry
except:
gpd = None
_geometry = None
def convert_null_geometry_to_empty(frame):
"""Utility method. Convert any geometry in the geoDataFrame which is
"null" (`None` or empty) to a Point type geometry which is empty. The
returned geoDateFrame is suitable for projecting and other geometrical
transformations.
"""
def null_to_point(x):
if x is None or x.is_empty:
return _geometry.Point()
return x
newgeo = frame.geometry.map(null_to_point)
return frame.set_geometry(newgeo)
def convert_null_geometry_to_none(frame):
"""Utility method. Convert any geometry in the geoDataFrame which is
"null" (`None` or empty) to `None`. The returned geoDateFrame is suitable
for saving.
"""
def null_to_none(x):
if x is None or x.is_empty:
return None
return x
newgeo = frame.geometry.map(null_to_none)
return frame.set_geometry(newgeo)
def load_to_geoDataFrame(filename, datetime_as_string=True,
type="snapshot", empty_geometry="none"):
"""Return the same data as :func:`load_to_GeoJSON` but as a geoPandas
data-frame.
:param filename: Filename of the CSV file to process
:param datetime_as_string: Write the timestamp as an ISO formatted string.
Defaults to True which is best for saving the dataframe as e.g. a shape
file. Set to False to get timestamps as python objects, which is best
for using (geo)pandas to analyse the data.
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:param empty_geometry: Either "none" to return `None` as the geometry of
crimes which have no location data in the CSV file (this is correct if
you wish to save the data-frame); or "empty" to return an empty `Point`
type (which is correct, for example, if you wish to re-project the
data-frame). Yes, GeoPandas appears to be annoying like this.
"""
geo_data = load_to_GeoJSON(filename, type=type)
if not datetime_as_string:
for feature in geo_data:
feature["properties"]["timestamp"] = _date_from_iso(feature["properties"]["timestamp"])
frame = gpd.GeoDataFrame.from_features(geo_data)
if empty_geometry == "none":
pass
elif empty_geometry == "empty":
frame = convert_null_geometry_to_empty(frame)
else:
raise ValueError("Unknown `empty_geometry` parameter `{}`".format(empty_geometry))
frame.crs = {"init":"epsg:4326"}
return frame
_sides = None
def _load_sides():
global _sides
if _sides is not None:
return
global _datadir
geojson = _path.join(_datadir, "Chicago_Areas.geojson")
frame = gpd.read_file(geojson)
side_mapping = {
"Far North" : [1,2,3,4,9,10,11,12,13,14,76,77],
"Northwest" : [15,16,17,18,19,20],
"North" : [5,6,7,21,22],
"West" : list(range(23, 32)),
"Central" : [8,32,33],
"South" : list(range(34,44)) + [60, 69],
"Southwest" : [56,57,58,59] + list(range(61,69)),
"Far Southwest" : list(range(70,76)),
"Far Southeast" : list(range(44,56))
}
frame["side"] = frame.area_numbe.map(lambda x : next(key
for key, item in side_mapping.items() if int(x) in item) )
_sides = frame.drop(["area", "area_num_1", "comarea", "comarea_id",
"perimeter", "shape_area", "shape_len"], axis=1)
_sides.crs = {"init": "epsg:4326"}
_sides = _sides.to_crs({"init": "epsg:2790"})
def get_side(name):
"""Return a geometry (a polygon, typically) of the outline of the shape
of the given "side" of Chicago, projected to {"init":"epsg:2790"}, which
is Illinois in metres.
Needs the file "Chicago_Areas.geojson" to be in the "datadir". This can
be downloaded from:
https://data.cityofchicago.org/Facilities-Geographic-Boundaries/Boundaries-Community-Areas-current-/cauq-8yn6
:param name: One of "Far North", "Northwest", "North", "West", "Central",
"South", "Southwest", "Far Southwest", "Far Southeast"
"""
_load_sides()
return _sides[_sides.side == name].unary_union
| en | 0.817582 | sources.chicago =============== Reads a CSV file in the format (as of April 2017) of data available from: - https://catalog.data.gov/dataset/crimes-one-year-prior-to-present-e171f - https://catalog.data.gov/dataset/crimes-2001-to-present-398a4 The default data is loaded from a file "chicago.csv" which should be downloaded from one of the above links. The format of the data, frustratingly, differs between the snapshot of last year, and the total. The data is partly anonymous in that the address within a block is obscured, while the geocoding seems complicated (work in progress to understand)... The crime type "HOMICIDE" is reported multiple times in the dataset. Set the default location for search for the default input file. Returns the default filename, if available. Otherwise raises AttributeError. Convert a datetime string in ISO format into a :class:`datetime` instance. :param iso_string: Like "2017-10-23T05:12:39" :return: A :class:`datetime` instance. # Like 4/16/13 5:00 #', Load the default data, if available, giving just "THEFT" data. :return: An instance of :class:`open_cp.data.TimedPoints` or `None`. Load data from a CSV file in the expected format. :param file: Name of the CSV file load, or a file-like object. :param primary_description_names: Set of names to search for in the "primary description field". E.g. pass `{"THEFT"}` to return only the "theft" crime type. :param to_meters: Convert the coordinates to meters; True by default. :param type: Either "snapshot" or "all" depending on whether the data has headers conforming the the data "last year" or "2001 to present". :return: An instance of :class:`open_cp.data.TimedPoints` or `None`. Generate a sequence of GeoJSON "features" from the CSV file. See :func:`load_to_GeoJSON`. :param file: Either a filename, or a file object. Load the specified CSV file to a list of GeoJSON (see http://geojson.org/) features. Events with no location data have `None` as the geometry. Timestamps are converted to standard ISO string format. The returned "properties" have these keys: - "case" for the "CASE#" field - "crime" for the "PRIMARY DESCRIPTION" field - "type" for the "SECONDARY DESCRIPTION" field - "location" for the "LOCATION DESCRIPTION" field - "timestamp" for the "DATE OF OCCURRENCE" field - "address" for the "BLOCK" field :param filename: Filename of the CSV file to process :param type: Either "snapshot" or "all" depending on whether the data has headers conforming the the data "last year" or "2001 to present". :return: List of Python dictionaries in GeoJSON format. Utility method. Convert any geometry in the geoDataFrame which is "null" (`None` or empty) to a Point type geometry which is empty. The returned geoDateFrame is suitable for projecting and other geometrical transformations. Utility method. Convert any geometry in the geoDataFrame which is "null" (`None` or empty) to `None`. The returned geoDateFrame is suitable for saving. Return the same data as :func:`load_to_GeoJSON` but as a geoPandas data-frame. :param filename: Filename of the CSV file to process :param datetime_as_string: Write the timestamp as an ISO formatted string. Defaults to True which is best for saving the dataframe as e.g. a shape file. Set to False to get timestamps as python objects, which is best for using (geo)pandas to analyse the data. :param type: Either "snapshot" or "all" depending on whether the data has headers conforming the the data "last year" or "2001 to present". :param empty_geometry: Either "none" to return `None` as the geometry of crimes which have no location data in the CSV file (this is correct if you wish to save the data-frame); or "empty" to return an empty `Point` type (which is correct, for example, if you wish to re-project the data-frame). Yes, GeoPandas appears to be annoying like this. Return a geometry (a polygon, typically) of the outline of the shape of the given "side" of Chicago, projected to {"init":"epsg:2790"}, which is Illinois in metres. Needs the file "Chicago_Areas.geojson" to be in the "datadir". This can be downloaded from: https://data.cityofchicago.org/Facilities-Geographic-Boundaries/Boundaries-Community-Areas-current-/cauq-8yn6 :param name: One of "Far North", "Northwest", "North", "West", "Central", "South", "Southwest", "Far Southwest", "Far Southeast" | 3.247385 | 3 |
Codility/Lesson/0011.Sieve-of-Eratosthenes/CountSemiprimes/CountSemiprimes.py | kimi0230/LeetcodeGolang | 4 | 10606 | <reponame>kimi0230/LeetcodeGolang<filename>Codility/Lesson/0011.Sieve-of-Eratosthenes/CountSemiprimes/CountSemiprimes.py
# https://github.com/Anfany/Codility-Lessons-By-Python3/blob/master/L11_Sieve%20of%20Eratosthenes/11.2%20CountSemiprimes.md
def solution(N, P, Q):
"""
返回由数组P、Q的元素组成的区间内,不大于N的半素数的个数, 时间复杂度O(N * log(log(N)) + M)
:param N: 半素数的最大值
:param P: 数组
:param Q: 数组
:return: 每次查询,得到的半素数的个数
"""
# 半素数只有3或4个因子,并且不能是素数的立方,例如(1, 3, 9, 27)(1, 5, 25, 125)这种情况
# 首先计算出不大于N的半素数列表,是半素数的为其值,不是的为0
semi_prime = []
k =0
for i in range(1, N + 1):
factor_count = 0
sign = 0
for j in range(1, int(i ** 0.5) + 1):
if i % j == 0:
factor_count += 1
f = i / j
if f != j:
if f == j ** 2:
sign = 1
semi_prime.append(0)
break
else:
factor_count += 1
if factor_count > 4:
sign = 1
semi_prime.append(0)
break
if sign != 1:
if factor_count >= 3:
semi_prime.append(i)
else:
semi_prime.append(0)
index_dict = {} # 得出当前数值以及前面一共有几个半素数
semi_dict = {} # 如果是半素数,则添加到字典中
count = 0
for index, value in enumerate(semi_prime):
if value != 0:
count += 1
index_dict[value] = count
semi_dict[value] = 0
else:
index_dict[index + 1] = count
# index_dict {1: 0, 2: 0, 3: 0, 4: 1, 5: 1, 6: 2, 7: 2, 8: 2, 9: 3, 10: 4, 11: 4, 12: 4, 13: 4, 14: 5, 15: 6, 16: 6, 17: 6, 18: 6, 19: 6, 20: 6, 21: 7, 22: 8, 23: 8, 24: 8, 25: 9, 26: 10}
#semi_dict {4: 0, 6: 0, 9: 0, 10: 0, 14: 0, 15: 0, 21: 0, 22: 0, 25: 0, 26: 0}
print("index_dict",index_dict)
print("semi_dict",semi_dict)
result_list = [] # 开始计算,在指定区间内有几个半素数
for i, j in zip(P, Q):
if i in semi_dict:
result_list.append(index_dict[j] - index_dict[i] + 1)
else:
result_list.append(index_dict[j] - index_dict[i])
return result_list
if __name__ == '__main__':
solution(26,[1, 4, 16],[26, 10, 20]) | # https://github.com/Anfany/Codility-Lessons-By-Python3/blob/master/L11_Sieve%20of%20Eratosthenes/11.2%20CountSemiprimes.md
def solution(N, P, Q):
"""
返回由数组P、Q的元素组成的区间内,不大于N的半素数的个数, 时间复杂度O(N * log(log(N)) + M)
:param N: 半素数的最大值
:param P: 数组
:param Q: 数组
:return: 每次查询,得到的半素数的个数
"""
# 半素数只有3或4个因子,并且不能是素数的立方,例如(1, 3, 9, 27)(1, 5, 25, 125)这种情况
# 首先计算出不大于N的半素数列表,是半素数的为其值,不是的为0
semi_prime = []
k =0
for i in range(1, N + 1):
factor_count = 0
sign = 0
for j in range(1, int(i ** 0.5) + 1):
if i % j == 0:
factor_count += 1
f = i / j
if f != j:
if f == j ** 2:
sign = 1
semi_prime.append(0)
break
else:
factor_count += 1
if factor_count > 4:
sign = 1
semi_prime.append(0)
break
if sign != 1:
if factor_count >= 3:
semi_prime.append(i)
else:
semi_prime.append(0)
index_dict = {} # 得出当前数值以及前面一共有几个半素数
semi_dict = {} # 如果是半素数,则添加到字典中
count = 0
for index, value in enumerate(semi_prime):
if value != 0:
count += 1
index_dict[value] = count
semi_dict[value] = 0
else:
index_dict[index + 1] = count
# index_dict {1: 0, 2: 0, 3: 0, 4: 1, 5: 1, 6: 2, 7: 2, 8: 2, 9: 3, 10: 4, 11: 4, 12: 4, 13: 4, 14: 5, 15: 6, 16: 6, 17: 6, 18: 6, 19: 6, 20: 6, 21: 7, 22: 8, 23: 8, 24: 8, 25: 9, 26: 10}
#semi_dict {4: 0, 6: 0, 9: 0, 10: 0, 14: 0, 15: 0, 21: 0, 22: 0, 25: 0, 26: 0}
print("index_dict",index_dict)
print("semi_dict",semi_dict)
result_list = [] # 开始计算,在指定区间内有几个半素数
for i, j in zip(P, Q):
if i in semi_dict:
result_list.append(index_dict[j] - index_dict[i] + 1)
else:
result_list.append(index_dict[j] - index_dict[i])
return result_list
if __name__ == '__main__':
solution(26,[1, 4, 16],[26, 10, 20]) | zh | 0.498726 | # https://github.com/Anfany/Codility-Lessons-By-Python3/blob/master/L11_Sieve%20of%20Eratosthenes/11.2%20CountSemiprimes.md 返回由数组P、Q的元素组成的区间内,不大于N的半素数的个数, 时间复杂度O(N * log(log(N)) + M) :param N: 半素数的最大值 :param P: 数组 :param Q: 数组 :return: 每次查询,得到的半素数的个数 # 半素数只有3或4个因子,并且不能是素数的立方,例如(1, 3, 9, 27)(1, 5, 25, 125)这种情况 # 首先计算出不大于N的半素数列表,是半素数的为其值,不是的为0 # 得出当前数值以及前面一共有几个半素数 # 如果是半素数,则添加到字典中 # index_dict {1: 0, 2: 0, 3: 0, 4: 1, 5: 1, 6: 2, 7: 2, 8: 2, 9: 3, 10: 4, 11: 4, 12: 4, 13: 4, 14: 5, 15: 6, 16: 6, 17: 6, 18: 6, 19: 6, 20: 6, 21: 7, 22: 8, 23: 8, 24: 8, 25: 9, 26: 10} #semi_dict {4: 0, 6: 0, 9: 0, 10: 0, 14: 0, 15: 0, 21: 0, 22: 0, 25: 0, 26: 0} # 开始计算,在指定区间内有几个半素数 | 3.365284 | 3 |
src/zope/formlib/errors.py | zopefoundation/zope.formlib | 4 | 10607 | <reponame>zopefoundation/zope.formlib<filename>src/zope/formlib/errors.py
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Error related things.
"""
try:
from html import escape
except ImportError: # pragma: NO COVER
from cgi import escape
from zope.component import adapter
from zope.interface import implementer
from zope.interface import Invalid
from zope.i18n import Message
from zope.i18n import translate
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.publisher.browser import BrowserPage
from zope.formlib.interfaces import IWidgetInputErrorView
from zope.formlib.interfaces import IInvalidCSRFTokenError
@implementer(IWidgetInputErrorView)
@adapter(Invalid, IBrowserRequest)
class InvalidErrorView(object):
"""Display a validation error as a snippet of text."""
def __init__(self, context, request):
self.context = context
self.request = request
def snippet(self):
"""Convert a widget input error to an html snippet
>>> from zope.interface.exceptions import Invalid
>>> error = Invalid("You made an error!")
>>> InvalidErrorView(error, None).snippet()
u'<span class="error">You made an error!</span>'
"""
msg = self.context.args[0]
if isinstance(msg, Message):
msg = translate(msg, context=self.request)
return u'<span class="error">%s</span>' % escape(msg)
@adapter(IInvalidCSRFTokenError, IBrowserRequest)
class InvalidCSRFTokenErrorView(BrowserPage):
def update(self):
self.request.response.setStatus(403)
self.request.response.setHeader(
'Expires', 'Jan, 1 Jan 1970 00:00:00 GMT')
self.request.response.setHeader(
'Cache-Control', 'no-store, no-cache, must-revalidate')
self.request.response.setHeader(
'Pragma', 'no-cache')
def render(self):
msg = self.context.args[0]
if isinstance(msg, Message):
msg = translate(msg, context=self.request)
return escape(msg)
def __call__(self):
self.update()
return self.render()
| ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Error related things.
"""
try:
from html import escape
except ImportError: # pragma: NO COVER
from cgi import escape
from zope.component import adapter
from zope.interface import implementer
from zope.interface import Invalid
from zope.i18n import Message
from zope.i18n import translate
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.publisher.browser import BrowserPage
from zope.formlib.interfaces import IWidgetInputErrorView
from zope.formlib.interfaces import IInvalidCSRFTokenError
@implementer(IWidgetInputErrorView)
@adapter(Invalid, IBrowserRequest)
class InvalidErrorView(object):
"""Display a validation error as a snippet of text."""
def __init__(self, context, request):
self.context = context
self.request = request
def snippet(self):
"""Convert a widget input error to an html snippet
>>> from zope.interface.exceptions import Invalid
>>> error = Invalid("You made an error!")
>>> InvalidErrorView(error, None).snippet()
u'<span class="error">You made an error!</span>'
"""
msg = self.context.args[0]
if isinstance(msg, Message):
msg = translate(msg, context=self.request)
return u'<span class="error">%s</span>' % escape(msg)
@adapter(IInvalidCSRFTokenError, IBrowserRequest)
class InvalidCSRFTokenErrorView(BrowserPage):
def update(self):
self.request.response.setStatus(403)
self.request.response.setHeader(
'Expires', 'Jan, 1 Jan 1970 00:00:00 GMT')
self.request.response.setHeader(
'Cache-Control', 'no-store, no-cache, must-revalidate')
self.request.response.setHeader(
'Pragma', 'no-cache')
def render(self):
msg = self.context.args[0]
if isinstance(msg, Message):
msg = translate(msg, context=self.request)
return escape(msg)
def __call__(self):
self.update()
return self.render() | en | 0.398117 | ############################################################################## # # Copyright (c) 2006 Zope Foundation and Contributors. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## Error related things. # pragma: NO COVER Display a validation error as a snippet of text. Convert a widget input error to an html snippet >>> from zope.interface.exceptions import Invalid >>> error = Invalid("You made an error!") >>> InvalidErrorView(error, None).snippet() u'<span class="error">You made an error!</span>' | 2.010225 | 2 |
src/packagedcode/about.py | sthagen/nexB-scancode-toolkit | 0 | 10608 | #
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import io
import os
from pathlib import Path
import saneyaml
from packagedcode import models
from packageurl import PackageURL
# TODO: Override get_package_resource so it returns the Resource that the ABOUT file is describing
TRACE = os.environ.get('SCANCODE_DEBUG_PACKAGE', False)
def logger_debug(*args):
pass
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(
' '.join(isinstance(a, str) and a or repr(a) for a in args)
)
class AboutFileHandler(models.DatafileHandler):
datasource_id = 'about_file'
default_package_type = 'about'
path_patterns = ('*.ABOUT',)
description = 'AboutCode ABOUT file'
documentation_url = 'https://aboutcode-toolkit.readthedocs.io/en/latest/specification.html'
@classmethod
def parse(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""
with io.open(location, encoding='utf-8') as loc:
package_data = saneyaml.load(loc.read())
# About files can contain any purl and also have a namespace
about_type = package_data.get('type')
about_ns = package_data.get('namespace')
purl_type = None
purl_ns = None
purl = package_data.get('purl')
if purl:
purl = PackageURL.from_string(purl)
if purl:
purl_type = purl.type
package_type = about_type or purl_type or cls.default_package_type
package_ns = about_ns or purl_ns
name = package_data.get('name')
version = package_data.get('version')
homepage_url = package_data.get('home_url') or package_data.get('homepage_url')
download_url = package_data.get('download_url')
copyright_statement = package_data.get('copyright')
license_expression = package_data.get('license_expression')
declared_license = license_expression
owner = package_data.get('owner')
if not isinstance(owner, str):
owner = repr(owner)
parties = [models.Party(type=models.party_person, name=owner, role='owner')]
# FIXME: also include notice_file and license_file(s) as file_references
file_references = []
about_resource = package_data.get('about_resource')
if about_resource:
file_references.append(models.FileReference(path=about_resource))
# FIXME: we should put the unprocessed attributes in extra data
yield models.PackageData(
datasource_id=cls.datasource_id,
type=package_type,
namespace=package_ns,
name=name,
version=version,
declared_license=declared_license,
license_expression=license_expression,
copyright=copyright_statement,
parties=parties,
homepage_url=homepage_url,
download_url=download_url,
file_references=file_references,
)
@classmethod
def assemble(cls, package_data, resource, codebase):
"""
Yield a Package. Note that ABOUT files do not carry dependencies.
"""
datafile_path = resource.path
# do we have enough to create a package?
if package_data.purl:
package = models.Package.from_package_data(
package_data=package_data,
datafile_path=datafile_path,
)
package_uid = package.package_uid
# NOTE: we do not attach files to the Package level. Instead we
# update `for_package` in the file
resource.for_packages.append(package_uid)
resource.save(codebase)
if not package.license_expression:
package.license_expression = cls.compute_normalized_license(package)
yield package
if resource.pid is not None and package_data.file_references:
parent_resource = resource.parent(codebase)
if parent_resource and package_data.file_references:
root_path = Path(parent_resource.path)
# FIXME: we should be able to get the path relatively to the
# ABOUT file resource a file ref extends from the root of
# the filesystem
file_references_by_path = {
str(root_path / ref.path): ref
for ref in package.file_references
}
for res in parent_resource.walk(codebase):
ref = file_references_by_path.get(res.path)
if not ref:
continue
# path is found and processed: remove it, so we can
# check if we found all of them
del file_references_by_path[res.path]
res.for_packages.append(package_uid)
res.save(codebase)
yield res
# if we have left over file references, add these to extra data
if file_references_by_path:
missing = sorted(file_references_by_path.values(), key=lambda r: r.path)
package.extra_data['missing_file_references'] = missing
else:
package.extra_data['missing_file_references'] = package_data.file_references[:]
# we yield this as we do not want this further processed
yield resource
| #
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import io
import os
from pathlib import Path
import saneyaml
from packagedcode import models
from packageurl import PackageURL
# TODO: Override get_package_resource so it returns the Resource that the ABOUT file is describing
TRACE = os.environ.get('SCANCODE_DEBUG_PACKAGE', False)
def logger_debug(*args):
pass
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(
' '.join(isinstance(a, str) and a or repr(a) for a in args)
)
class AboutFileHandler(models.DatafileHandler):
datasource_id = 'about_file'
default_package_type = 'about'
path_patterns = ('*.ABOUT',)
description = 'AboutCode ABOUT file'
documentation_url = 'https://aboutcode-toolkit.readthedocs.io/en/latest/specification.html'
@classmethod
def parse(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""
with io.open(location, encoding='utf-8') as loc:
package_data = saneyaml.load(loc.read())
# About files can contain any purl and also have a namespace
about_type = package_data.get('type')
about_ns = package_data.get('namespace')
purl_type = None
purl_ns = None
purl = package_data.get('purl')
if purl:
purl = PackageURL.from_string(purl)
if purl:
purl_type = purl.type
package_type = about_type or purl_type or cls.default_package_type
package_ns = about_ns or purl_ns
name = package_data.get('name')
version = package_data.get('version')
homepage_url = package_data.get('home_url') or package_data.get('homepage_url')
download_url = package_data.get('download_url')
copyright_statement = package_data.get('copyright')
license_expression = package_data.get('license_expression')
declared_license = license_expression
owner = package_data.get('owner')
if not isinstance(owner, str):
owner = repr(owner)
parties = [models.Party(type=models.party_person, name=owner, role='owner')]
# FIXME: also include notice_file and license_file(s) as file_references
file_references = []
about_resource = package_data.get('about_resource')
if about_resource:
file_references.append(models.FileReference(path=about_resource))
# FIXME: we should put the unprocessed attributes in extra data
yield models.PackageData(
datasource_id=cls.datasource_id,
type=package_type,
namespace=package_ns,
name=name,
version=version,
declared_license=declared_license,
license_expression=license_expression,
copyright=copyright_statement,
parties=parties,
homepage_url=homepage_url,
download_url=download_url,
file_references=file_references,
)
@classmethod
def assemble(cls, package_data, resource, codebase):
"""
Yield a Package. Note that ABOUT files do not carry dependencies.
"""
datafile_path = resource.path
# do we have enough to create a package?
if package_data.purl:
package = models.Package.from_package_data(
package_data=package_data,
datafile_path=datafile_path,
)
package_uid = package.package_uid
# NOTE: we do not attach files to the Package level. Instead we
# update `for_package` in the file
resource.for_packages.append(package_uid)
resource.save(codebase)
if not package.license_expression:
package.license_expression = cls.compute_normalized_license(package)
yield package
if resource.pid is not None and package_data.file_references:
parent_resource = resource.parent(codebase)
if parent_resource and package_data.file_references:
root_path = Path(parent_resource.path)
# FIXME: we should be able to get the path relatively to the
# ABOUT file resource a file ref extends from the root of
# the filesystem
file_references_by_path = {
str(root_path / ref.path): ref
for ref in package.file_references
}
for res in parent_resource.walk(codebase):
ref = file_references_by_path.get(res.path)
if not ref:
continue
# path is found and processed: remove it, so we can
# check if we found all of them
del file_references_by_path[res.path]
res.for_packages.append(package_uid)
res.save(codebase)
yield res
# if we have left over file references, add these to extra data
if file_references_by_path:
missing = sorted(file_references_by_path.values(), key=lambda r: r.path)
package.extra_data['missing_file_references'] = missing
else:
package.extra_data['missing_file_references'] = package_data.file_references[:]
# we yield this as we do not want this further processed
yield resource
| en | 0.845177 | # # Copyright (c) nexB Inc. and others. All rights reserved. # ScanCode is a trademark of nexB Inc. # SPDX-License-Identifier: Apache-2.0 # See http://www.apache.org/licenses/LICENSE-2.0 for the license text. # See https://github.com/nexB/scancode-toolkit for support or download. # See https://aboutcode.org for more information about nexB OSS projects. # # TODO: Override get_package_resource so it returns the Resource that the ABOUT file is describing Yield one or more Package manifest objects given a file ``location`` pointing to a package archive, manifest or similar. # About files can contain any purl and also have a namespace # FIXME: also include notice_file and license_file(s) as file_references # FIXME: we should put the unprocessed attributes in extra data Yield a Package. Note that ABOUT files do not carry dependencies. # do we have enough to create a package? # NOTE: we do not attach files to the Package level. Instead we # update `for_package` in the file # FIXME: we should be able to get the path relatively to the # ABOUT file resource a file ref extends from the root of # the filesystem # path is found and processed: remove it, so we can # check if we found all of them # if we have left over file references, add these to extra data # we yield this as we do not want this further processed | 2.159453 | 2 |
gazepattern/eyedetector/admin.py | AriRodriguezCruz/mcfgpr | 0 | 10609 | # -*- coding: utf-8 -*-
#django
from django.contrib import admin
from django.db import transaction
#python
import csv
from decimal import Decimal
#gazepattern
from .models import Experiment, ExperimentPoint, Image, ImageRectangle, ExperimentPointCSV, ExperimentFunction
@transaction.atomic
def procesar(modeladmin, request, queryset):
for query in queryset:
file = query.file
with open(file.path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
rows = [row for row in csv_reader if len(row)]
for row in rows:
experiment_id = int(row[0])
fixation_number = int(row[1])
x = Decimal(row[2])
y = Decimal(row[3])
experiment = Experiment.objects.get(pk=experiment_id)
experiment_point = ExperimentPoint()
experiment_point.experiment = experiment
experiment_point.fixation_number = fixation_number
experiment_point.x = x
experiment_point.y = y
experiment_point.save()
procesar.short_description = "Procesar CSV para generar experiments points"
class ExperimentPointCSVAdmin(admin.ModelAdmin):
list_display = ['id', 'file']
ordering = ['id']
actions = [procesar, ]
class ExperimentPointAdmin(admin.ModelAdmin):
list_display = ['id', 'experiment_id', 'fixation_number', 'x', 'y']
ordering = ['id']
search_fields = ["experiment__id"]
class ImageAdmin(admin.ModelAdmin):
list_display = ['id', 'name']
ordering = ['id']
class ExperimentAdmin(admin.ModelAdmin):
list_display = ['id', 'name', 'description']
ordering = ['id']
class ImageRectangleAdmin(admin.ModelAdmin):
list_display = ['id', 'image_id','name']
ordering = ['id']
search_fields = ['image__id']
class ExperimentFunctionAdmin(admin.ModelAdmin):
list_display = ['id', 'experiment_id', 'function']
ordering = ['id']
search_fields = ['experiment__id']
admin.site.register(ExperimentPointCSV, ExperimentPointCSVAdmin)
admin.site.register(ExperimentPoint, ExperimentPointAdmin)
admin.site.register(Image, ImageAdmin)
admin.site.register(Experiment, ExperimentAdmin)
admin.site.register(ImageRectangle, ImageRectangleAdmin)
admin.site.register(ExperimentFunction, ExperimentFunctionAdmin) | # -*- coding: utf-8 -*-
#django
from django.contrib import admin
from django.db import transaction
#python
import csv
from decimal import Decimal
#gazepattern
from .models import Experiment, ExperimentPoint, Image, ImageRectangle, ExperimentPointCSV, ExperimentFunction
@transaction.atomic
def procesar(modeladmin, request, queryset):
for query in queryset:
file = query.file
with open(file.path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
rows = [row for row in csv_reader if len(row)]
for row in rows:
experiment_id = int(row[0])
fixation_number = int(row[1])
x = Decimal(row[2])
y = Decimal(row[3])
experiment = Experiment.objects.get(pk=experiment_id)
experiment_point = ExperimentPoint()
experiment_point.experiment = experiment
experiment_point.fixation_number = fixation_number
experiment_point.x = x
experiment_point.y = y
experiment_point.save()
procesar.short_description = "Procesar CSV para generar experiments points"
class ExperimentPointCSVAdmin(admin.ModelAdmin):
list_display = ['id', 'file']
ordering = ['id']
actions = [procesar, ]
class ExperimentPointAdmin(admin.ModelAdmin):
list_display = ['id', 'experiment_id', 'fixation_number', 'x', 'y']
ordering = ['id']
search_fields = ["experiment__id"]
class ImageAdmin(admin.ModelAdmin):
list_display = ['id', 'name']
ordering = ['id']
class ExperimentAdmin(admin.ModelAdmin):
list_display = ['id', 'name', 'description']
ordering = ['id']
class ImageRectangleAdmin(admin.ModelAdmin):
list_display = ['id', 'image_id','name']
ordering = ['id']
search_fields = ['image__id']
class ExperimentFunctionAdmin(admin.ModelAdmin):
list_display = ['id', 'experiment_id', 'function']
ordering = ['id']
search_fields = ['experiment__id']
admin.site.register(ExperimentPointCSV, ExperimentPointCSVAdmin)
admin.site.register(ExperimentPoint, ExperimentPointAdmin)
admin.site.register(Image, ImageAdmin)
admin.site.register(Experiment, ExperimentAdmin)
admin.site.register(ImageRectangle, ImageRectangleAdmin)
admin.site.register(ExperimentFunction, ExperimentFunctionAdmin) | en | 0.399694 | # -*- coding: utf-8 -*- #django #python #gazepattern | 2.069042 | 2 |
orders/tests/test_views.py | ms0680146/Order_System | 0 | 10610 | <reponame>ms0680146/Order_System<filename>orders/tests/test_views.py
from django.test import TestCase, Client
from django.urls import reverse
from orders.models import Order, OrderItem
from datetime import datetime
from django.utils.timezone import get_current_timezone
import pytz
class TestViews(TestCase):
def setUp(self):
self.client = Client()
def test_home_GET(self):
response = self.client.get(reverse('home'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'layout.html')
def test_piechart_GET(self):
response = self.client.get(reverse('piechart'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'pages/piechart.html')
def test_cohort_GET(self):
response = self.client.get(reverse('cohort'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'pages/cohort.html')
def test_barchart_GET(self):
response = self.client.get(reverse('barchart'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'pages/barchart.html')
def test_get_shipping_data_GET(self):
tz = get_current_timezone()
shipping_free = Order.objects.create(
order_id=1,
customer_id=10,
shipping=0,
created_at=tz.localize(datetime.now())
)
shipping_need = Order.objects.create(
order_id=2,
customer_id=14,
shipping=80,
created_at=tz.localize(datetime.now())
)
response = self.client.get(reverse('api-shipping-data'))
self.assertJSONEqual(response.content, {"labels": ["free shipping", "need shipping"], "counts": [1, 1]})
def test_get_top3_products_GET(self):
product1 = OrderItem.objects.create(
order_id=1,
product_name='product1',
qty=3
)
product2 = OrderItem.objects.create(
order_id=2,
product_name='product2',
qty=2
)
product2_1 = OrderItem.objects.create(
order_id=3,
product_name='product2',
qty=5
)
product3 = OrderItem.objects.create(
order_id=4,
product_name='product3',
qty=1
)
product4 = OrderItem.objects.create(
order_id=5,
product_name='product4',
qty=2
)
response = self.client.get(reverse('api-top3-products'))
self.assertJSONEqual(response.content, {"labels": ["product2", "product1", "product4"], "counts": [7, 3, 2]}) | from django.test import TestCase, Client
from django.urls import reverse
from orders.models import Order, OrderItem
from datetime import datetime
from django.utils.timezone import get_current_timezone
import pytz
class TestViews(TestCase):
def setUp(self):
self.client = Client()
def test_home_GET(self):
response = self.client.get(reverse('home'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'layout.html')
def test_piechart_GET(self):
response = self.client.get(reverse('piechart'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'pages/piechart.html')
def test_cohort_GET(self):
response = self.client.get(reverse('cohort'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'pages/cohort.html')
def test_barchart_GET(self):
response = self.client.get(reverse('barchart'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'pages/barchart.html')
def test_get_shipping_data_GET(self):
tz = get_current_timezone()
shipping_free = Order.objects.create(
order_id=1,
customer_id=10,
shipping=0,
created_at=tz.localize(datetime.now())
)
shipping_need = Order.objects.create(
order_id=2,
customer_id=14,
shipping=80,
created_at=tz.localize(datetime.now())
)
response = self.client.get(reverse('api-shipping-data'))
self.assertJSONEqual(response.content, {"labels": ["free shipping", "need shipping"], "counts": [1, 1]})
def test_get_top3_products_GET(self):
product1 = OrderItem.objects.create(
order_id=1,
product_name='product1',
qty=3
)
product2 = OrderItem.objects.create(
order_id=2,
product_name='product2',
qty=2
)
product2_1 = OrderItem.objects.create(
order_id=3,
product_name='product2',
qty=5
)
product3 = OrderItem.objects.create(
order_id=4,
product_name='product3',
qty=1
)
product4 = OrderItem.objects.create(
order_id=5,
product_name='product4',
qty=2
)
response = self.client.get(reverse('api-top3-products'))
self.assertJSONEqual(response.content, {"labels": ["product2", "product1", "product4"], "counts": [7, 3, 2]}) | none | 1 | 2.421433 | 2 |
|
codigo/hexagonal/app/adapter/light_bulb_repository.py | VulturARG/charla_01 | 0 | 10611 | <reponame>VulturARG/charla_01<filename>codigo/hexagonal/app/adapter/light_bulb_repository.py
from codigo.hexagonal.app.domain.switchable_repository import Switchable
class LightBulb(Switchable):
def turn_on(self) -> bool:
print("Connecting with the device...")
print("The light is on")
return True
def turn_off(self) -> bool:
print("The light is off")
print("Disconnecting with the device...")
return False
| from codigo.hexagonal.app.domain.switchable_repository import Switchable
class LightBulb(Switchable):
def turn_on(self) -> bool:
print("Connecting with the device...")
print("The light is on")
return True
def turn_off(self) -> bool:
print("The light is off")
print("Disconnecting with the device...")
return False | none | 1 | 3.024101 | 3 |
|
aula12/ex1.py | otaviobizulli/python-exercices | 0 | 10612 | from random import randint
menor = 100
linha = 0
maior = 0
m = []
for i in range(10):
m.append([])
for j in range(10):
m[i].append(randint(1,99))
for i in range(10):
for j in range(10):
print(f'{m[i][j]:2}',end=' ')
print()
for i in range(10):
for j in range(10):
if m[i][j] > maior:
maior = m[i][j]
linha = i
for i in range(10):
if m[linha][i] < menor:
menor = m[linha][i]
print(f'o minimax é {menor}, com o maior sendo {maior} na linha {linha+1}.')
| from random import randint
menor = 100
linha = 0
maior = 0
m = []
for i in range(10):
m.append([])
for j in range(10):
m[i].append(randint(1,99))
for i in range(10):
for j in range(10):
print(f'{m[i][j]:2}',end=' ')
print()
for i in range(10):
for j in range(10):
if m[i][j] > maior:
maior = m[i][j]
linha = i
for i in range(10):
if m[linha][i] < menor:
menor = m[linha][i]
print(f'o minimax é {menor}, com o maior sendo {maior} na linha {linha+1}.')
| none | 1 | 3.566015 | 4 |
|
src/token_classification/format.py | adriens63/BERT_fine_tuning_for_MLM_and_token_classification | 0 | 10613 | <reponame>adriens63/BERT_fine_tuning_for_MLM_and_token_classification
import os.path as osp
import argparse
import yaml
from src.token_classification.archs.data_formatter import *
# ********************* launch formating ***********************
# cmd to launch : python -m src.token_classification.format --config ./src/token_classification/config/config.yml
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'formatting for labeling')
parser.add_argument('--config', type=str, required=True, help='path to yaml config')
args = parser.parse_args()
with open(args.config, 'r') as f:
config = yaml.safe_load(f)
asigning_variables(config)
print('.... Start formatting')
path = osp.join(config['path'], config['offres'])
yaml_path = osp.join(config['path'], config['yaml'])
formatter = Formatter(path, yaml_path)
formatter.generate_name()
formatter.load()
formatter.sort_desc()
formatter.format_to_jsonl_in_proportions(n_desc = config['n_sequences'])
print('done;')
print()
print('/!\ Be careful to change the owner of the file before pasting it in doccano with the following command : sudo chown <user> <file>')
| import os.path as osp
import argparse
import yaml
from src.token_classification.archs.data_formatter import *
# ********************* launch formating ***********************
# cmd to launch : python -m src.token_classification.format --config ./src/token_classification/config/config.yml
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'formatting for labeling')
parser.add_argument('--config', type=str, required=True, help='path to yaml config')
args = parser.parse_args()
with open(args.config, 'r') as f:
config = yaml.safe_load(f)
asigning_variables(config)
print('.... Start formatting')
path = osp.join(config['path'], config['offres'])
yaml_path = osp.join(config['path'], config['yaml'])
formatter = Formatter(path, yaml_path)
formatter.generate_name()
formatter.load()
formatter.sort_desc()
formatter.format_to_jsonl_in_proportions(n_desc = config['n_sequences'])
print('done;')
print()
print('/!\ Be careful to change the owner of the file before pasting it in doccano with the following command : sudo chown <user> <file>') | en | 0.199583 | # ********************* launch formating *********************** # cmd to launch : python -m src.token_classification.format --config ./src/token_classification/config/config.yml | 2.524344 | 3 |
add.py | cleolepart/timedomain | 0 | 10614 | <filename>add.py<gh_stars>0
from __future__ import absolute_import, division, print_function
import os, sys, time
import numpy as np
import scipy.sparse
import scipy.linalg
import scipy.sparse.linalg
from astropy.table import Table, Column
import multiprocessing
from desiutil.log import get_logger
from desispec.interpolation import resample_flux
from desispec.spectra import Spectra
from desispec.resolution import Resolution
from desispec.fiberbitmasking import get_all_fiberbitmask_with_amp, get_all_nonamp_fiberbitmask_val, get_justamps_fiberbitmask
from desispec.specscore import compute_coadd_scores
from desispec.coaddition import coadd_fibermap
def add(spectra, cosmics_nsig=0.) :
"""
Coaddition the spectra for each target and each camera. The input spectra is modified.
Args:
spectra: desispec.spectra.Spectra object
Options:
cosmics_nsig: float, nsigma clipping threshold for cosmics rays
"""
log = get_logger()
targets = np.unique(spectra.fibermap["TARGETID"])
ntarget=targets.size
log.debug("number of targets= {}".format(ntarget))
for b in spectra.bands :
log.debug("coadding band '{}'".format(b))
nwave=spectra.wave[b].size
tflux=np.zeros((ntarget,nwave),dtype=spectra.flux[b].dtype)
tivar=np.zeros((ntarget,nwave),dtype=spectra.ivar[b].dtype)
if spectra.mask is not None :
tmask=np.zeros((ntarget,nwave),dtype=spectra.mask[b].dtype)
else :
tmask=None
trdata=np.zeros((ntarget,spectra.resolution_data[b].shape[1],nwave),dtype=spectra.resolution_data[b].dtype)
fiberstatus_bits = get_all_fiberbitmask_with_amp(b)
good_fiberstatus = ( (spectra.fibermap["FIBERSTATUS"] & fiberstatus_bits) == 0 )
for i,tid in enumerate(targets) :
jj=np.where( (spectra.fibermap["TARGETID"]==tid) & good_fiberstatus )[0]
#- if all spectra were flagged as bad (FIBERSTATUS != 0), contine
#- to next target, leaving tflux and tivar=0 for this target
if len(jj) == 0:
continue
if cosmics_nsig is not None and cosmics_nsig > 0 and len(jj)>2 :
# interpolate over bad measurements
# to be able to compute gradient next
# to a bad pixel and identify outlier
# many cosmics residuals are on edge
# of cosmic ray trace, and so can be
# next to a masked flux bin
grad=[]
gradvar=[]
for j in jj :
if spectra.mask is not None :
ttivar = spectra.ivar[b][j]*(spectra.mask[b][j]==0)
else :
ttivar = spectra.ivar[b][j]
good = (ttivar>0)
bad = ~good
if np.sum(good)==0 :
continue
nbad = np.sum(bad)
ttflux = spectra.flux[b][j].copy()
if nbad>0 :
ttflux[bad] = np.interp(spectra.wave[b][bad],spectra.wave[b][good],ttflux[good])
ttivar = spectra.ivar[b][j].copy()
if nbad>0 :
ttivar[bad] = np.interp(spectra.wave[b][bad],spectra.wave[b][good],ttivar[good])
ttvar = 1./(ttivar+(ttivar==0))
ttflux[1:] = ttflux[1:]-ttflux[:-1]
ttvar[1:] = ttvar[1:]+ttvar[:-1]
ttflux[0] = 0
grad.append(ttflux)
gradvar.append(ttvar)
#tivar_unmasked= np.sum(spectra.ivar[b][jj],axis=0)
tivar_unmasked = 1 / np.sum(1/spectra.ivar[b][jj],axis=0)
if spectra.mask is not None :
ivarjj=spectra.ivar[b][jj]*(spectra.mask[b][jj]==0)
else :
ivarjj=spectra.ivar[b][jj]
if cosmics_nsig is not None and cosmics_nsig > 0 and len(jj)>2 :
grad=np.array(grad)
gradvar=np.array(gradvar)
gradivar=(gradvar>0)/np.array(gradvar+(gradvar==0))
nspec=grad.shape[0]
sgradivar=np.sum(gradivar)
if sgradivar>0 :
meangrad=np.sum(gradivar*grad,axis=0)/sgradivar
deltagrad=grad-meangrad
chi2=np.sum(gradivar*deltagrad**2,axis=0)/(nspec-1)
bad = (chi2>cosmics_nsig**2)
nbad = np.sum(bad)
if nbad>0 :
log.info("masking {} values for targetid={}".format(nbad,tid))
badindex=np.where(bad)[0]
for bi in badindex :
k=np.argmax(gradivar[:,bi]*deltagrad[:,bi]**2)
ivarjj[k,bi]=0.
log.debug("masking spec {} wave={}".format(k,spectra.wave[b][bi]))
#tivar[i]=np.sum(ivarjj,axis=0)
tivar[i]= 1 / np.sum(1/ivarjj,axis=0)
tflux[i]=np.sum(spectra.flux[b][jj],axis=0)
for r in range(spectra.resolution_data[b].shape[1]) :
trdata[i,r]=np.sum((spectra.resolution_data[b][jj,r]),axis=0) # not sure applying mask is wise here
bad=(tivar[i]==0)
if np.sum(bad)>0 :
tivar[i][bad] = 1 / np.sum(1/spectra.ivar[b][jj][:,bad],axis=0) # if all masked, keep original ivar
tflux[i][bad] = np.sum(spectra.flux[b][jj][:,bad],axis=0)
ok=(tivar[i]>0)
#if np.sum(ok)>0 :
#tflux[i][ok] /= tivar[i][ok]
ok=(tivar_unmasked>0)
if np.sum(ok)>0 :
trdata[i][:,ok] /= tivar_unmasked[ok]
if spectra.mask is not None :
tmask[i] = np.bitwise_or.reduce(spectra.mask[b][jj],axis=0)
spectra.flux[b] = tflux
spectra.ivar[b] = tivar
if spectra.mask is not None :
spectra.mask[b] = tmask
spectra.resolution_data[b] = trdata
if spectra.scores is not None:
orig_scores = Table(spectra.scores.copy())
orig_scores['TARGETID'] = spectra.fibermap['TARGETID']
else:
orig_scores = None
spectra.fibermap=coadd_fibermap(spectra.fibermap)
spectra.scores=None
compute_coadd_scores(spectra, orig_scores, update_coadd=True)
| <filename>add.py<gh_stars>0
from __future__ import absolute_import, division, print_function
import os, sys, time
import numpy as np
import scipy.sparse
import scipy.linalg
import scipy.sparse.linalg
from astropy.table import Table, Column
import multiprocessing
from desiutil.log import get_logger
from desispec.interpolation import resample_flux
from desispec.spectra import Spectra
from desispec.resolution import Resolution
from desispec.fiberbitmasking import get_all_fiberbitmask_with_amp, get_all_nonamp_fiberbitmask_val, get_justamps_fiberbitmask
from desispec.specscore import compute_coadd_scores
from desispec.coaddition import coadd_fibermap
def add(spectra, cosmics_nsig=0.) :
"""
Coaddition the spectra for each target and each camera. The input spectra is modified.
Args:
spectra: desispec.spectra.Spectra object
Options:
cosmics_nsig: float, nsigma clipping threshold for cosmics rays
"""
log = get_logger()
targets = np.unique(spectra.fibermap["TARGETID"])
ntarget=targets.size
log.debug("number of targets= {}".format(ntarget))
for b in spectra.bands :
log.debug("coadding band '{}'".format(b))
nwave=spectra.wave[b].size
tflux=np.zeros((ntarget,nwave),dtype=spectra.flux[b].dtype)
tivar=np.zeros((ntarget,nwave),dtype=spectra.ivar[b].dtype)
if spectra.mask is not None :
tmask=np.zeros((ntarget,nwave),dtype=spectra.mask[b].dtype)
else :
tmask=None
trdata=np.zeros((ntarget,spectra.resolution_data[b].shape[1],nwave),dtype=spectra.resolution_data[b].dtype)
fiberstatus_bits = get_all_fiberbitmask_with_amp(b)
good_fiberstatus = ( (spectra.fibermap["FIBERSTATUS"] & fiberstatus_bits) == 0 )
for i,tid in enumerate(targets) :
jj=np.where( (spectra.fibermap["TARGETID"]==tid) & good_fiberstatus )[0]
#- if all spectra were flagged as bad (FIBERSTATUS != 0), contine
#- to next target, leaving tflux and tivar=0 for this target
if len(jj) == 0:
continue
if cosmics_nsig is not None and cosmics_nsig > 0 and len(jj)>2 :
# interpolate over bad measurements
# to be able to compute gradient next
# to a bad pixel and identify outlier
# many cosmics residuals are on edge
# of cosmic ray trace, and so can be
# next to a masked flux bin
grad=[]
gradvar=[]
for j in jj :
if spectra.mask is not None :
ttivar = spectra.ivar[b][j]*(spectra.mask[b][j]==0)
else :
ttivar = spectra.ivar[b][j]
good = (ttivar>0)
bad = ~good
if np.sum(good)==0 :
continue
nbad = np.sum(bad)
ttflux = spectra.flux[b][j].copy()
if nbad>0 :
ttflux[bad] = np.interp(spectra.wave[b][bad],spectra.wave[b][good],ttflux[good])
ttivar = spectra.ivar[b][j].copy()
if nbad>0 :
ttivar[bad] = np.interp(spectra.wave[b][bad],spectra.wave[b][good],ttivar[good])
ttvar = 1./(ttivar+(ttivar==0))
ttflux[1:] = ttflux[1:]-ttflux[:-1]
ttvar[1:] = ttvar[1:]+ttvar[:-1]
ttflux[0] = 0
grad.append(ttflux)
gradvar.append(ttvar)
#tivar_unmasked= np.sum(spectra.ivar[b][jj],axis=0)
tivar_unmasked = 1 / np.sum(1/spectra.ivar[b][jj],axis=0)
if spectra.mask is not None :
ivarjj=spectra.ivar[b][jj]*(spectra.mask[b][jj]==0)
else :
ivarjj=spectra.ivar[b][jj]
if cosmics_nsig is not None and cosmics_nsig > 0 and len(jj)>2 :
grad=np.array(grad)
gradvar=np.array(gradvar)
gradivar=(gradvar>0)/np.array(gradvar+(gradvar==0))
nspec=grad.shape[0]
sgradivar=np.sum(gradivar)
if sgradivar>0 :
meangrad=np.sum(gradivar*grad,axis=0)/sgradivar
deltagrad=grad-meangrad
chi2=np.sum(gradivar*deltagrad**2,axis=0)/(nspec-1)
bad = (chi2>cosmics_nsig**2)
nbad = np.sum(bad)
if nbad>0 :
log.info("masking {} values for targetid={}".format(nbad,tid))
badindex=np.where(bad)[0]
for bi in badindex :
k=np.argmax(gradivar[:,bi]*deltagrad[:,bi]**2)
ivarjj[k,bi]=0.
log.debug("masking spec {} wave={}".format(k,spectra.wave[b][bi]))
#tivar[i]=np.sum(ivarjj,axis=0)
tivar[i]= 1 / np.sum(1/ivarjj,axis=0)
tflux[i]=np.sum(spectra.flux[b][jj],axis=0)
for r in range(spectra.resolution_data[b].shape[1]) :
trdata[i,r]=np.sum((spectra.resolution_data[b][jj,r]),axis=0) # not sure applying mask is wise here
bad=(tivar[i]==0)
if np.sum(bad)>0 :
tivar[i][bad] = 1 / np.sum(1/spectra.ivar[b][jj][:,bad],axis=0) # if all masked, keep original ivar
tflux[i][bad] = np.sum(spectra.flux[b][jj][:,bad],axis=0)
ok=(tivar[i]>0)
#if np.sum(ok)>0 :
#tflux[i][ok] /= tivar[i][ok]
ok=(tivar_unmasked>0)
if np.sum(ok)>0 :
trdata[i][:,ok] /= tivar_unmasked[ok]
if spectra.mask is not None :
tmask[i] = np.bitwise_or.reduce(spectra.mask[b][jj],axis=0)
spectra.flux[b] = tflux
spectra.ivar[b] = tivar
if spectra.mask is not None :
spectra.mask[b] = tmask
spectra.resolution_data[b] = trdata
if spectra.scores is not None:
orig_scores = Table(spectra.scores.copy())
orig_scores['TARGETID'] = spectra.fibermap['TARGETID']
else:
orig_scores = None
spectra.fibermap=coadd_fibermap(spectra.fibermap)
spectra.scores=None
compute_coadd_scores(spectra, orig_scores, update_coadd=True)
| en | 0.768633 | Coaddition the spectra for each target and each camera. The input spectra is modified. Args: spectra: desispec.spectra.Spectra object Options: cosmics_nsig: float, nsigma clipping threshold for cosmics rays #- if all spectra were flagged as bad (FIBERSTATUS != 0), contine #- to next target, leaving tflux and tivar=0 for this target # interpolate over bad measurements # to be able to compute gradient next # to a bad pixel and identify outlier # many cosmics residuals are on edge # of cosmic ray trace, and so can be # next to a masked flux bin #tivar_unmasked= np.sum(spectra.ivar[b][jj],axis=0) #tivar[i]=np.sum(ivarjj,axis=0) # not sure applying mask is wise here # if all masked, keep original ivar #if np.sum(ok)>0 : #tflux[i][ok] /= tivar[i][ok] | 1.963416 | 2 |
pontoon/pretranslation/tests/test_pretranslate.py | timvisee/pontoon | 0 | 10615 | from mock import patch
import pytest
from pontoon.base.models import User
from pontoon.pretranslation.pretranslate import get_translations
from pontoon.test.factories import (
EntityFactory,
TranslationMemoryFactory,
)
@patch("pontoon.pretranslation.pretranslate.get_google_translate_data")
@pytest.mark.django_db
def test_get_translations(gt_mock, locale_b, resource_a, google_translate_locale):
entities = [
EntityFactory(resource=resource_a, string=x, order=i)
for i, x in enumerate(["abaa", "abac", "aaab", "abab"])
]
entities[1].string_plural = entities[1].string
entities[3].string_plural = entities[3].string
entities[1].save()
entities[3].save()
google_translate_locale.cldr_plurals = "1, 2"
google_translate_locale.save()
for entity in entities[0:2]:
TranslationMemoryFactory.create(
entity=entity, source=entity.string, target=entity.string, locale=locale_b,
)
TranslationMemoryFactory.create(
entity=entity,
source=entity.string,
target=entity.string,
locale=google_translate_locale,
)
# Mock the return value of get_google_translate_data
gt_mock.return_value = {
"status": True,
"translation": "gt_translation",
}
tm_user = User.objects.get(email="<EMAIL>")
gt_user = User.objects.get(email="<EMAIL>")
# 100% match exists in translation memory.
response_a = get_translations(entities[0], locale_b)
response_b = get_translations(entities[0], google_translate_locale)
assert response_a == [(entities[0].string, None, tm_user)]
assert response_b == [(entities[0].string, None, tm_user)]
# 100% match does not exists and locale.google_translate_code is None.
response = get_translations(entities[2], locale_b)
assert response == []
# 100% match does not exists and locale.google_translate_code is not None.
response = get_translations(entities[2], google_translate_locale)
assert response == [("gt_translation", None, gt_user)]
# Entity.string_plural is not None.
response_a = get_translations(entities[1], google_translate_locale)
response_b = get_translations(entities[3], google_translate_locale)
assert response_a == [
(entities[1].string, 0, tm_user),
(entities[1].string, 1, tm_user),
]
assert response_b == [
("gt_translation", 0, gt_user),
("gt_translation", 1, gt_user),
]
| from mock import patch
import pytest
from pontoon.base.models import User
from pontoon.pretranslation.pretranslate import get_translations
from pontoon.test.factories import (
EntityFactory,
TranslationMemoryFactory,
)
@patch("pontoon.pretranslation.pretranslate.get_google_translate_data")
@pytest.mark.django_db
def test_get_translations(gt_mock, locale_b, resource_a, google_translate_locale):
entities = [
EntityFactory(resource=resource_a, string=x, order=i)
for i, x in enumerate(["abaa", "abac", "aaab", "abab"])
]
entities[1].string_plural = entities[1].string
entities[3].string_plural = entities[3].string
entities[1].save()
entities[3].save()
google_translate_locale.cldr_plurals = "1, 2"
google_translate_locale.save()
for entity in entities[0:2]:
TranslationMemoryFactory.create(
entity=entity, source=entity.string, target=entity.string, locale=locale_b,
)
TranslationMemoryFactory.create(
entity=entity,
source=entity.string,
target=entity.string,
locale=google_translate_locale,
)
# Mock the return value of get_google_translate_data
gt_mock.return_value = {
"status": True,
"translation": "gt_translation",
}
tm_user = User.objects.get(email="<EMAIL>")
gt_user = User.objects.get(email="<EMAIL>")
# 100% match exists in translation memory.
response_a = get_translations(entities[0], locale_b)
response_b = get_translations(entities[0], google_translate_locale)
assert response_a == [(entities[0].string, None, tm_user)]
assert response_b == [(entities[0].string, None, tm_user)]
# 100% match does not exists and locale.google_translate_code is None.
response = get_translations(entities[2], locale_b)
assert response == []
# 100% match does not exists and locale.google_translate_code is not None.
response = get_translations(entities[2], google_translate_locale)
assert response == [("gt_translation", None, gt_user)]
# Entity.string_plural is not None.
response_a = get_translations(entities[1], google_translate_locale)
response_b = get_translations(entities[3], google_translate_locale)
assert response_a == [
(entities[1].string, 0, tm_user),
(entities[1].string, 1, tm_user),
]
assert response_b == [
("gt_translation", 0, gt_user),
("gt_translation", 1, gt_user),
]
| en | 0.63322 | # Mock the return value of get_google_translate_data # 100% match exists in translation memory. # 100% match does not exists and locale.google_translate_code is None. # 100% match does not exists and locale.google_translate_code is not None. # Entity.string_plural is not None. | 2.12164 | 2 |
cubes/common.py | digitalsatori/cubes | 1,020 | 10616 | # -*- encoding: utf-8 -*-
"""Utility functions for computing combinations of dimensions and hierarchy
levels"""
from __future__ import absolute_import
import re
import os.path
import json
from collections import OrderedDict
from .errors import ModelInconsistencyError, ArgumentError, ConfigurationError
from . import compat
__all__ = [
"IgnoringDictionary",
"MissingPackage",
"localize_common",
"localize_attributes",
"get_localizable_attributes",
"decamelize",
"to_identifier",
"assert_instance",
"assert_all_instances",
"read_json_file",
"sorted_dependencies",
]
class IgnoringDictionary(OrderedDict):
"""Simple dictionary extension that will ignore any keys of which values
are empty (None/False)"""
def __setitem__(self, key, value):
if value is not None:
super(IgnoringDictionary, self).__setitem__(key, value)
def set(self, key, value):
"""Sets `value` for `key` even if value is null."""
super(IgnoringDictionary, self).__setitem__(key, value)
def __repr__(self):
items = []
for key, value in self.items():
item = '%s: %s' % (repr(key), repr(value))
items.append(item)
return "{%s}" % ", ".join(items)
def assert_instance(obj, class_, label):
"""Raises ArgumentError when `obj` is not instance of `cls`"""
if not isinstance(obj, class_):
raise ModelInconsistencyError("%s should be sublcass of %s, "
"provided: %s" % (label,
class_.__name__,
type(obj).__name__))
def assert_all_instances(list_, class_, label="object"):
"""Raises ArgumentError when objects in `list_` are not instances of
`cls`"""
for obj in list_ or []:
assert_instance(obj, class_, label="object")
class MissingPackageError(Exception):
"""Exception raised when encountered a missing package."""
pass
class MissingPackage(object):
"""Bogus class to handle missing optional packages - packages that are not
necessarily required for Cubes, but are needed for certain features."""
def __init__(self, package, feature = None, source = None, comment = None):
self.package = package
self.feature = feature
self.source = source
self.comment = comment
def __call__(self, *args, **kwargs):
self._fail()
def __getattr__(self, name):
self._fail()
def _fail(self):
if self.feature:
use = " to be able to use: %s" % self.feature
else:
use = ""
if self.source:
source = " from %s" % self.source
else:
source = ""
if self.comment:
comment = ". %s" % self.comment
else:
comment = ""
raise MissingPackageError("Optional package '%s' is not installed. "
"Please install the package%s%s%s" %
(self.package, source, use, comment))
def optional_import(name, feature=None, source=None, comment=None):
"""Optionally import package `name`. If package does not exist, import a
placeholder object, that raises an exception with more detailed
description about the missing package."""
try:
return __import__(name)
except ImportError:
return MissingPackage(name, feature, source, comment)
def expand_dictionary(record, separator='.'):
"""Return expanded dictionary: treat keys are paths separated by
`separator`, create sub-dictionaries as necessary"""
result = {}
for key, value in record.items():
current = result
path = key.split(separator)
for part in path[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[path[-1]] = value
return result
def localize_common(obj, trans):
"""Localize common attributes: label and description"""
if "label" in trans:
obj.label = trans["label"]
if "description" in trans:
obj.description = trans["description"]
def localize_attributes(attribs, translations):
"""Localize list of attributes. `translations` should be a dictionary with
keys as attribute names, values are dictionaries with localizable
attribute metadata, such as ``label`` or ``description``."""
for (name, atrans) in translations.items():
attrib = attribs[name]
localize_common(attrib, atrans)
def get_localizable_attributes(obj):
"""Returns a dictionary with localizable attributes of `obj`."""
# FIXME: use some kind of class attribute to get list of localizable attributes
locale = {}
try:
if obj.label:
locale["label"] = obj.label
except:
pass
try:
if obj.description:
locale["description"] = obj.description
except:
pass
return locale
def decamelize(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s1)
def to_identifier(name):
return re.sub(r' ', r'_', name).lower()
def to_label(name, capitalize=True):
"""Converts `name` into label by replacing underscores by spaces. If
`capitalize` is ``True`` (default) then the first letter of the label is
capitalized."""
label = name.replace("_", " ")
if capitalize:
label = label.capitalize()
return label
def coalesce_option_value(value, value_type, label=None):
"""Convert string into an object value of `value_type`. The type might be:
`string` (no conversion), `integer`, `float`, `list` – comma separated
list of strings.
"""
value_type = value_type.lower()
try:
if value_type in ('string', 'str'):
return_value = str(value)
elif value_type == 'list':
if isinstance(value, compat.string_type):
return_value = value.split(",")
else:
return_value = list(value)
elif value_type == "float":
return_value = float(value)
elif value_type in ["integer", "int"]:
return_value = int(value)
elif value_type in ["bool", "boolean"]:
if not value:
return_value = False
elif isinstance(value, compat.string_type):
return_value = value.lower() in ["1", "true", "yes", "on"]
else:
return_value = bool(value)
else:
raise ArgumentError("Unknown option value type %s" % value_type)
except ValueError:
if label:
label = "parameter %s " % label
else:
label = ""
raise ArgumentError("Unable to convert %svalue '%s' into type %s" %
(label, astring, value_type))
return return_value
def coalesce_options(options, types):
"""Coalesce `options` dictionary according to types dictionary. Keys in
`types` refer to keys in `options`, values of `types` are value types:
string, list, float, integer or bool."""
out = {}
for key, value in options.items():
if key in types:
out[key] = coalesce_option_value(value, types[key], key)
else:
out[key] = value
return out
def read_json_file(path, kind=None):
"""Read a JSON from `path`. This is convenience function that provides
more descriptive exception handling."""
kind = "%s " % str(kind) if kind else ""
if not os.path.exists(path):
raise ConfigurationError("Can not find %sfile '%s'"
% (kind, path))
try:
f = compat.open_unicode(path)
except IOError:
raise ConfigurationError("Can not open %sfile '%s'"
% (kind, path))
try:
content = json.load(f)
except ValueError as e:
raise SyntaxError("Syntax error in %sfile %s: %s"
% (kind, path, str(e)))
finally:
f.close()
return content
def sorted_dependencies(graph):
"""Return keys from `deps` ordered by dependency (topological sort).
`deps` is a dictionary where keys are strings and values are list of
strings where keys is assumed to be dependant on values.
Example::
A ---> B -+--> C
|
+--> D --> E
Will be: ``{"A": ["B"], "B": ["C", "D"], "D": ["E"],"E": []}``
"""
graph = dict((key, set(value)) for key, value in graph.items())
# L ← Empty list that will contain the sorted elements
L = []
# S ← Set of all nodes with no dependencies (incoming edges)
S = set(parent for parent, req in graph.items() if not req)
while S:
# remove a node n from S
n = S.pop()
# insert n into L
L.append(n)
# for each node m with an edge e from n to m do
# (n that depends on m)
parents = [parent for parent, req in graph.items() if n in req]
for parent in parents:
graph[parent].remove(n)
# remove edge e from the graph
# if m has no other incoming edges then insert m into S
if not graph[parent]:
S.add(parent)
# if graph has edges then -> error
nonempty = [k for k, v in graph.items() if v]
if nonempty:
raise ArgumentError("Cyclic dependency of: %s"
% ", ".join(nonempty))
return L
| # -*- encoding: utf-8 -*-
"""Utility functions for computing combinations of dimensions and hierarchy
levels"""
from __future__ import absolute_import
import re
import os.path
import json
from collections import OrderedDict
from .errors import ModelInconsistencyError, ArgumentError, ConfigurationError
from . import compat
__all__ = [
"IgnoringDictionary",
"MissingPackage",
"localize_common",
"localize_attributes",
"get_localizable_attributes",
"decamelize",
"to_identifier",
"assert_instance",
"assert_all_instances",
"read_json_file",
"sorted_dependencies",
]
class IgnoringDictionary(OrderedDict):
"""Simple dictionary extension that will ignore any keys of which values
are empty (None/False)"""
def __setitem__(self, key, value):
if value is not None:
super(IgnoringDictionary, self).__setitem__(key, value)
def set(self, key, value):
"""Sets `value` for `key` even if value is null."""
super(IgnoringDictionary, self).__setitem__(key, value)
def __repr__(self):
items = []
for key, value in self.items():
item = '%s: %s' % (repr(key), repr(value))
items.append(item)
return "{%s}" % ", ".join(items)
def assert_instance(obj, class_, label):
"""Raises ArgumentError when `obj` is not instance of `cls`"""
if not isinstance(obj, class_):
raise ModelInconsistencyError("%s should be sublcass of %s, "
"provided: %s" % (label,
class_.__name__,
type(obj).__name__))
def assert_all_instances(list_, class_, label="object"):
"""Raises ArgumentError when objects in `list_` are not instances of
`cls`"""
for obj in list_ or []:
assert_instance(obj, class_, label="object")
class MissingPackageError(Exception):
"""Exception raised when encountered a missing package."""
pass
class MissingPackage(object):
"""Bogus class to handle missing optional packages - packages that are not
necessarily required for Cubes, but are needed for certain features."""
def __init__(self, package, feature = None, source = None, comment = None):
self.package = package
self.feature = feature
self.source = source
self.comment = comment
def __call__(self, *args, **kwargs):
self._fail()
def __getattr__(self, name):
self._fail()
def _fail(self):
if self.feature:
use = " to be able to use: %s" % self.feature
else:
use = ""
if self.source:
source = " from %s" % self.source
else:
source = ""
if self.comment:
comment = ". %s" % self.comment
else:
comment = ""
raise MissingPackageError("Optional package '%s' is not installed. "
"Please install the package%s%s%s" %
(self.package, source, use, comment))
def optional_import(name, feature=None, source=None, comment=None):
"""Optionally import package `name`. If package does not exist, import a
placeholder object, that raises an exception with more detailed
description about the missing package."""
try:
return __import__(name)
except ImportError:
return MissingPackage(name, feature, source, comment)
def expand_dictionary(record, separator='.'):
"""Return expanded dictionary: treat keys are paths separated by
`separator`, create sub-dictionaries as necessary"""
result = {}
for key, value in record.items():
current = result
path = key.split(separator)
for part in path[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[path[-1]] = value
return result
def localize_common(obj, trans):
"""Localize common attributes: label and description"""
if "label" in trans:
obj.label = trans["label"]
if "description" in trans:
obj.description = trans["description"]
def localize_attributes(attribs, translations):
"""Localize list of attributes. `translations` should be a dictionary with
keys as attribute names, values are dictionaries with localizable
attribute metadata, such as ``label`` or ``description``."""
for (name, atrans) in translations.items():
attrib = attribs[name]
localize_common(attrib, atrans)
def get_localizable_attributes(obj):
"""Returns a dictionary with localizable attributes of `obj`."""
# FIXME: use some kind of class attribute to get list of localizable attributes
locale = {}
try:
if obj.label:
locale["label"] = obj.label
except:
pass
try:
if obj.description:
locale["description"] = obj.description
except:
pass
return locale
def decamelize(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s1)
def to_identifier(name):
return re.sub(r' ', r'_', name).lower()
def to_label(name, capitalize=True):
"""Converts `name` into label by replacing underscores by spaces. If
`capitalize` is ``True`` (default) then the first letter of the label is
capitalized."""
label = name.replace("_", " ")
if capitalize:
label = label.capitalize()
return label
def coalesce_option_value(value, value_type, label=None):
"""Convert string into an object value of `value_type`. The type might be:
`string` (no conversion), `integer`, `float`, `list` – comma separated
list of strings.
"""
value_type = value_type.lower()
try:
if value_type in ('string', 'str'):
return_value = str(value)
elif value_type == 'list':
if isinstance(value, compat.string_type):
return_value = value.split(",")
else:
return_value = list(value)
elif value_type == "float":
return_value = float(value)
elif value_type in ["integer", "int"]:
return_value = int(value)
elif value_type in ["bool", "boolean"]:
if not value:
return_value = False
elif isinstance(value, compat.string_type):
return_value = value.lower() in ["1", "true", "yes", "on"]
else:
return_value = bool(value)
else:
raise ArgumentError("Unknown option value type %s" % value_type)
except ValueError:
if label:
label = "parameter %s " % label
else:
label = ""
raise ArgumentError("Unable to convert %svalue '%s' into type %s" %
(label, astring, value_type))
return return_value
def coalesce_options(options, types):
"""Coalesce `options` dictionary according to types dictionary. Keys in
`types` refer to keys in `options`, values of `types` are value types:
string, list, float, integer or bool."""
out = {}
for key, value in options.items():
if key in types:
out[key] = coalesce_option_value(value, types[key], key)
else:
out[key] = value
return out
def read_json_file(path, kind=None):
"""Read a JSON from `path`. This is convenience function that provides
more descriptive exception handling."""
kind = "%s " % str(kind) if kind else ""
if not os.path.exists(path):
raise ConfigurationError("Can not find %sfile '%s'"
% (kind, path))
try:
f = compat.open_unicode(path)
except IOError:
raise ConfigurationError("Can not open %sfile '%s'"
% (kind, path))
try:
content = json.load(f)
except ValueError as e:
raise SyntaxError("Syntax error in %sfile %s: %s"
% (kind, path, str(e)))
finally:
f.close()
return content
def sorted_dependencies(graph):
"""Return keys from `deps` ordered by dependency (topological sort).
`deps` is a dictionary where keys are strings and values are list of
strings where keys is assumed to be dependant on values.
Example::
A ---> B -+--> C
|
+--> D --> E
Will be: ``{"A": ["B"], "B": ["C", "D"], "D": ["E"],"E": []}``
"""
graph = dict((key, set(value)) for key, value in graph.items())
# L ← Empty list that will contain the sorted elements
L = []
# S ← Set of all nodes with no dependencies (incoming edges)
S = set(parent for parent, req in graph.items() if not req)
while S:
# remove a node n from S
n = S.pop()
# insert n into L
L.append(n)
# for each node m with an edge e from n to m do
# (n that depends on m)
parents = [parent for parent, req in graph.items() if n in req]
for parent in parents:
graph[parent].remove(n)
# remove edge e from the graph
# if m has no other incoming edges then insert m into S
if not graph[parent]:
S.add(parent)
# if graph has edges then -> error
nonempty = [k for k, v in graph.items() if v]
if nonempty:
raise ArgumentError("Cyclic dependency of: %s"
% ", ".join(nonempty))
return L
| en | 0.769611 | # -*- encoding: utf-8 -*- Utility functions for computing combinations of dimensions and hierarchy levels Simple dictionary extension that will ignore any keys of which values are empty (None/False) Sets `value` for `key` even if value is null. Raises ArgumentError when `obj` is not instance of `cls` Raises ArgumentError when objects in `list_` are not instances of `cls` Exception raised when encountered a missing package. Bogus class to handle missing optional packages - packages that are not necessarily required for Cubes, but are needed for certain features. Optionally import package `name`. If package does not exist, import a placeholder object, that raises an exception with more detailed description about the missing package. Return expanded dictionary: treat keys are paths separated by `separator`, create sub-dictionaries as necessary Localize common attributes: label and description Localize list of attributes. `translations` should be a dictionary with keys as attribute names, values are dictionaries with localizable attribute metadata, such as ``label`` or ``description``. Returns a dictionary with localizable attributes of `obj`. # FIXME: use some kind of class attribute to get list of localizable attributes Converts `name` into label by replacing underscores by spaces. If `capitalize` is ``True`` (default) then the first letter of the label is capitalized. Convert string into an object value of `value_type`. The type might be: `string` (no conversion), `integer`, `float`, `list` – comma separated list of strings. Coalesce `options` dictionary according to types dictionary. Keys in `types` refer to keys in `options`, values of `types` are value types: string, list, float, integer or bool. Read a JSON from `path`. This is convenience function that provides more descriptive exception handling. Return keys from `deps` ordered by dependency (topological sort). `deps` is a dictionary where keys are strings and values are list of strings where keys is assumed to be dependant on values. Example:: A ---> B -+--> C | +--> D --> E Will be: ``{"A": ["B"], "B": ["C", "D"], "D": ["E"],"E": []}`` # L ← Empty list that will contain the sorted elements # S ← Set of all nodes with no dependencies (incoming edges) # remove a node n from S # insert n into L # for each node m with an edge e from n to m do # (n that depends on m) # remove edge e from the graph # if m has no other incoming edges then insert m into S # if graph has edges then -> error | 2.348462 | 2 |
wavenet_iaf.py | Ella77/ClariNet | 126 | 10617 | import torch
import torch.nn as nn
import torch.nn.functional as F
from modules import Conv, ResBlock
class Wavenet_Student(nn.Module):
def __init__(self, num_blocks_student=[1, 1, 1, 1, 1, 1], num_layers=10,
front_channels=32, residual_channels=64, gate_channels=128, skip_channels=64,
kernel_size=3, cin_channels=80, causal=True):
super(Wavenet_Student, self).__init__()
self.num_blocks = num_blocks_student
self.num_flow = len(self.num_blocks)
self.num_layers = num_layers
self.iafs = nn.ModuleList()
for i in range(self.num_flow):
self.iafs.append(Wavenet_Flow(out_channels=2,
num_blocks=self.num_blocks[i], num_layers=self.num_layers,
front_channels=front_channels, residual_channels=residual_channels,
gate_channels=gate_channels, skip_channels=skip_channels,
kernel_size=kernel_size, cin_channels=cin_channels, causal=causal))
def forward(self, z, c):
return self.iaf(z, c)
def iaf(self, z, c_up):
mu_tot, logs_tot = 0., 0.
for i, iaf in enumerate(self.iafs):
mu_logs = iaf(z, c_up)
mu = mu_logs[:, 0:1, :-1]
logs = mu_logs[:, 1:, :-1]
mu_tot = mu_tot * torch.exp(logs) + mu
logs_tot = logs_tot + logs
z = z[:, :, 1:] * torch.exp(logs) + mu
z = F.pad(z, pad=(1, 0), mode='constant', value=0)
return z, mu_tot, logs_tot
def receptive_field(self):
receptive_field = 1
for iaf in self.iafs:
receptive_field += iaf.receptive_field_size() - 1
return receptive_field
def generate(self, z, c_up):
x, _, _ = self.iaf(z, c_up)
return x
def remove_weight_norm(self):
for iaf in self.iafs:
iaf.remove_weight_norm()
class Wavenet_Flow(nn.Module):
def __init__(self, out_channels=1, num_blocks=1, num_layers=10,
front_channels=32, residual_channels=64, gate_channels=32, skip_channels=None,
kernel_size=3, cin_channels=80, causal=True):
super(Wavenet_Flow, self). __init__()
self.causal = causal
self.num_blocks = num_blocks
self.num_layers = num_layers
self.front_channels = front_channels
self.out_channels = out_channels
self.gate_channels = gate_channels
self.residual_channels = residual_channels
self.skip_channels = skip_channels
self.cin_channels = cin_channels
self.kernel_size = kernel_size
self.front_conv = nn.Sequential(
Conv(1, self.residual_channels, self.front_channels, causal=self.causal),
nn.ReLU()
)
self.res_blocks = nn.ModuleList()
self.res_blocks_fast = nn.ModuleList()
for b in range(self.num_blocks):
for n in range(self.num_layers):
self.res_blocks.append(ResBlock(self.residual_channels, self.gate_channels, self.skip_channels,
self.kernel_size, dilation=2**n,
cin_channels=self.cin_channels, local_conditioning=True,
causal=self.causal, mode='SAME'))
self.final_conv = nn.Sequential(
nn.ReLU(),
Conv(self.skip_channels, self.skip_channels, 1, causal=self.causal),
nn.ReLU(),
Conv(self.skip_channels, self.out_channels, 1, causal=self.causal)
)
def forward(self, x, c):
return self.wavenet(x, c)
def wavenet(self, tensor, c=None):
h = self.front_conv(tensor)
skip = 0
for i, f in enumerate(self.res_blocks):
h, s = f(h, c)
skip += s
out = self.final_conv(skip)
return out
def receptive_field_size(self):
num_dir = 1 if self.causal else 2
dilations = [2 ** (i % self.num_layers) for i in range(self.num_layers * self.num_blocks)]
return num_dir * (self.kernel_size - 1) * sum(dilations) + 1 + (self.front_channels - 1)
def remove_weight_norm(self):
for f in self.res_blocks:
f.remove_weight_norm()
| import torch
import torch.nn as nn
import torch.nn.functional as F
from modules import Conv, ResBlock
class Wavenet_Student(nn.Module):
def __init__(self, num_blocks_student=[1, 1, 1, 1, 1, 1], num_layers=10,
front_channels=32, residual_channels=64, gate_channels=128, skip_channels=64,
kernel_size=3, cin_channels=80, causal=True):
super(Wavenet_Student, self).__init__()
self.num_blocks = num_blocks_student
self.num_flow = len(self.num_blocks)
self.num_layers = num_layers
self.iafs = nn.ModuleList()
for i in range(self.num_flow):
self.iafs.append(Wavenet_Flow(out_channels=2,
num_blocks=self.num_blocks[i], num_layers=self.num_layers,
front_channels=front_channels, residual_channels=residual_channels,
gate_channels=gate_channels, skip_channels=skip_channels,
kernel_size=kernel_size, cin_channels=cin_channels, causal=causal))
def forward(self, z, c):
return self.iaf(z, c)
def iaf(self, z, c_up):
mu_tot, logs_tot = 0., 0.
for i, iaf in enumerate(self.iafs):
mu_logs = iaf(z, c_up)
mu = mu_logs[:, 0:1, :-1]
logs = mu_logs[:, 1:, :-1]
mu_tot = mu_tot * torch.exp(logs) + mu
logs_tot = logs_tot + logs
z = z[:, :, 1:] * torch.exp(logs) + mu
z = F.pad(z, pad=(1, 0), mode='constant', value=0)
return z, mu_tot, logs_tot
def receptive_field(self):
receptive_field = 1
for iaf in self.iafs:
receptive_field += iaf.receptive_field_size() - 1
return receptive_field
def generate(self, z, c_up):
x, _, _ = self.iaf(z, c_up)
return x
def remove_weight_norm(self):
for iaf in self.iafs:
iaf.remove_weight_norm()
class Wavenet_Flow(nn.Module):
def __init__(self, out_channels=1, num_blocks=1, num_layers=10,
front_channels=32, residual_channels=64, gate_channels=32, skip_channels=None,
kernel_size=3, cin_channels=80, causal=True):
super(Wavenet_Flow, self). __init__()
self.causal = causal
self.num_blocks = num_blocks
self.num_layers = num_layers
self.front_channels = front_channels
self.out_channels = out_channels
self.gate_channels = gate_channels
self.residual_channels = residual_channels
self.skip_channels = skip_channels
self.cin_channels = cin_channels
self.kernel_size = kernel_size
self.front_conv = nn.Sequential(
Conv(1, self.residual_channels, self.front_channels, causal=self.causal),
nn.ReLU()
)
self.res_blocks = nn.ModuleList()
self.res_blocks_fast = nn.ModuleList()
for b in range(self.num_blocks):
for n in range(self.num_layers):
self.res_blocks.append(ResBlock(self.residual_channels, self.gate_channels, self.skip_channels,
self.kernel_size, dilation=2**n,
cin_channels=self.cin_channels, local_conditioning=True,
causal=self.causal, mode='SAME'))
self.final_conv = nn.Sequential(
nn.ReLU(),
Conv(self.skip_channels, self.skip_channels, 1, causal=self.causal),
nn.ReLU(),
Conv(self.skip_channels, self.out_channels, 1, causal=self.causal)
)
def forward(self, x, c):
return self.wavenet(x, c)
def wavenet(self, tensor, c=None):
h = self.front_conv(tensor)
skip = 0
for i, f in enumerate(self.res_blocks):
h, s = f(h, c)
skip += s
out = self.final_conv(skip)
return out
def receptive_field_size(self):
num_dir = 1 if self.causal else 2
dilations = [2 ** (i % self.num_layers) for i in range(self.num_layers * self.num_blocks)]
return num_dir * (self.kernel_size - 1) * sum(dilations) + 1 + (self.front_channels - 1)
def remove_weight_norm(self):
for f in self.res_blocks:
f.remove_weight_norm()
| none | 1 | 2.430594 | 2 |
|
tests/__init__.py | ybelleguic/openbrokerapi | 36 | 10618 | try:
from gevent import monkey
monkey.patch_all()
except ImportError:
# fine if no gevent is available
pass
import base64
import logging
from unittest.mock import Mock
from flask.app import Flask
from flask_testing import TestCase
from openbrokerapi.api import BrokerCredentials
from openbrokerapi.log_util import basic_config
class BrokerTestCase(TestCase):
auth_header = 'Basic ' + base64.b64encode(b":").decode("ascii")
def create_app(self):
from openbrokerapi.api import get_blueprint
app = Flask(__name__)
self.broker = Mock()
app.register_blueprint(
get_blueprint(self.broker,
BrokerCredentials("", ""),
basic_config(level=logging.WARN)
)
)
return app
| try:
from gevent import monkey
monkey.patch_all()
except ImportError:
# fine if no gevent is available
pass
import base64
import logging
from unittest.mock import Mock
from flask.app import Flask
from flask_testing import TestCase
from openbrokerapi.api import BrokerCredentials
from openbrokerapi.log_util import basic_config
class BrokerTestCase(TestCase):
auth_header = 'Basic ' + base64.b64encode(b":").decode("ascii")
def create_app(self):
from openbrokerapi.api import get_blueprint
app = Flask(__name__)
self.broker = Mock()
app.register_blueprint(
get_blueprint(self.broker,
BrokerCredentials("", ""),
basic_config(level=logging.WARN)
)
)
return app
| en | 0.577928 | # fine if no gevent is available | 1.896225 | 2 |
ansible/lib/ansible/modules/extras/network/f5/bigip_gtm_wide_ip.py | kiv-box/kafka | 0 | 10619 | <reponame>kiv-box/kafka
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, <NAME>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_gtm_wide_ip
short_description: "Manages F5 BIG-IP GTM wide ip"
description:
- "Manages F5 BIG-IP GTM wide ip"
version_added: "2.0"
author:
- <NAME> (@perzizzle)
- <NAME> (@caphrim007)
notes:
- "Requires BIG-IP software version >= 11.4"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Tested with manager and above account privilege level"
requirements:
- bigsuds
options:
lb_method:
description:
- LB method of wide ip
required: true
choices: ['return_to_dns', 'null', 'round_robin',
'ratio', 'topology', 'static_persist', 'global_availability',
'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
wide_ip:
description:
- Wide IP name
required: true
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Set lb method
local_action: >
bigip_gtm_wide_ip
server=192.0.2.1
user=admin
password=<PASSWORD>
lb_method=round_robin
wide_ip=my-wide-ip.example.com
'''
try:
import bigsuds
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.f5 import bigip_api, f5_argument_spec
def get_wide_ip_lb_method(api, wide_ip):
lb_method = api.GlobalLB.WideIP.get_lb_method(wide_ips=[wide_ip])[0]
lb_method = lb_method.strip().replace('LB_METHOD_', '').lower()
return lb_method
def get_wide_ip_pools(api, wide_ip):
try:
return api.GlobalLB.WideIP.get_wideip_pool([wide_ip])
except Exception:
e = get_exception()
print(e)
def wide_ip_exists(api, wide_ip):
# hack to determine if wide_ip exists
result = False
try:
api.GlobalLB.WideIP.get_object_status(wide_ips=[wide_ip])
result = True
except bigsuds.OperationFailed:
e = get_exception()
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def set_wide_ip_lb_method(api, wide_ip, lb_method):
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.GlobalLB.WideIP.set_lb_method(wide_ips=[wide_ip], lb_methods=[lb_method])
def main():
argument_spec = f5_argument_spec()
lb_method_choices = ['return_to_dns', 'null', 'round_robin',
'ratio', 'topology', 'static_persist', 'global_availability',
'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
meta_args = dict(
lb_method = dict(type='str', required=True, choices=lb_method_choices),
wide_ip = dict(type='str', required=True)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
wide_ip = module.params['wide_ip']
lb_method = module.params['lb_method']
validate_certs = module.params['validate_certs']
result = {'changed': False} # default
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
if not wide_ip_exists(api, wide_ip):
module.fail_json(msg="wide ip %s does not exist" % wide_ip)
if lb_method is not None and lb_method != get_wide_ip_lb_method(api, wide_ip):
if not module.check_mode:
set_wide_ip_lb_method(api, wide_ip, lb_method)
result = {'changed': True}
else:
result = {'changed': True}
except Exception:
e = get_exception()
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
if __name__ == '__main__':
main()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, <NAME>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_gtm_wide_ip
short_description: "Manages F5 BIG-IP GTM wide ip"
description:
- "Manages F5 BIG-IP GTM wide ip"
version_added: "2.0"
author:
- <NAME> (@perzizzle)
- <NAME> (@caphrim007)
notes:
- "Requires BIG-IP software version >= 11.4"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Tested with manager and above account privilege level"
requirements:
- bigsuds
options:
lb_method:
description:
- LB method of wide ip
required: true
choices: ['return_to_dns', 'null', 'round_robin',
'ratio', 'topology', 'static_persist', 'global_availability',
'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
wide_ip:
description:
- Wide IP name
required: true
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Set lb method
local_action: >
bigip_gtm_wide_ip
server=192.0.2.1
user=admin
password=<PASSWORD>
lb_method=round_robin
wide_ip=my-wide-ip.example.com
'''
try:
import bigsuds
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.f5 import bigip_api, f5_argument_spec
def get_wide_ip_lb_method(api, wide_ip):
lb_method = api.GlobalLB.WideIP.get_lb_method(wide_ips=[wide_ip])[0]
lb_method = lb_method.strip().replace('LB_METHOD_', '').lower()
return lb_method
def get_wide_ip_pools(api, wide_ip):
try:
return api.GlobalLB.WideIP.get_wideip_pool([wide_ip])
except Exception:
e = get_exception()
print(e)
def wide_ip_exists(api, wide_ip):
# hack to determine if wide_ip exists
result = False
try:
api.GlobalLB.WideIP.get_object_status(wide_ips=[wide_ip])
result = True
except bigsuds.OperationFailed:
e = get_exception()
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def set_wide_ip_lb_method(api, wide_ip, lb_method):
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.GlobalLB.WideIP.set_lb_method(wide_ips=[wide_ip], lb_methods=[lb_method])
def main():
argument_spec = f5_argument_spec()
lb_method_choices = ['return_to_dns', 'null', 'round_robin',
'ratio', 'topology', 'static_persist', 'global_availability',
'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
meta_args = dict(
lb_method = dict(type='str', required=True, choices=lb_method_choices),
wide_ip = dict(type='str', required=True)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
wide_ip = module.params['wide_ip']
lb_method = module.params['lb_method']
validate_certs = module.params['validate_certs']
result = {'changed': False} # default
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
if not wide_ip_exists(api, wide_ip):
module.fail_json(msg="wide ip %s does not exist" % wide_ip)
if lb_method is not None and lb_method != get_wide_ip_lb_method(api, wide_ip):
if not module.check_mode:
set_wide_ip_lb_method(api, wide_ip, lb_method)
result = {'changed': True}
else:
result = {'changed': True}
except Exception:
e = get_exception()
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
if __name__ == '__main__':
main() | en | 0.677286 | #!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, <NAME> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. --- module: bigip_gtm_wide_ip short_description: "Manages F5 BIG-IP GTM wide ip" description: - "Manages F5 BIG-IP GTM wide ip" version_added: "2.0" author: - <NAME> (@perzizzle) - <NAME> (@caphrim007) notes: - "Requires BIG-IP software version >= 11.4" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - "Best run as a local_action in your playbook" - "Tested with manager and above account privilege level" requirements: - bigsuds options: lb_method: description: - LB method of wide ip required: true choices: ['return_to_dns', 'null', 'round_robin', 'ratio', 'topology', 'static_persist', 'global_availability', 'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops', 'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps', 'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score'] wide_ip: description: - Wide IP name required: true extends_documentation_fragment: f5 - name: Set lb method local_action: > bigip_gtm_wide_ip server=192.0.2.1 user=admin password=<PASSWORD> lb_method=round_robin wide_ip=my-wide-ip.example.com # hack to determine if wide_ip exists # genuine exception # default | 1.321861 | 1 |
sql/src/test/resources/joins/create_sample_table.py | MichelaSalvemini/Modelli_project | 677 | 10620 | #! /usr/bin/env python
from __future__ import print_function
import pandas as pd
import numpy as np
import argparse
def generate_csv(start_index, fname):
cols = [
str('A' + str(i)) for i in range(start_index, NUM_COLS + start_index)
]
data = []
for i in range(NUM_ROWS):
vals = (np.random.choice(NUM_DISTINCT_VALS) for j in range(NUM_COLS))
data.append(vals)
df = pd.DataFrame(data=data, columns=cols)
df.to_csv(fname, index=False, header=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate sample tables to test joins.')
parser.add_argument('--num-rows', '-r', type=int, default=100)
parser.add_argument('--num-cols', '-c', type=int, required=True)
parser.add_argument('--num-distinct-vals', '-d', type=int, required=True)
parser.add_argument('--num-cols-overlap', '-o', type=int, default=1)
args = parser.parse_args()
NUM_ROWS = args.num_rows
NUM_COLS = args.num_cols
NUM_DISTINCT_VALS = args.num_distinct_vals
num_overlap = args.num_cols_overlap
if num_overlap > NUM_COLS:
print('--num-cols-overlap cannot be greater than --num-cols')
import sys
sys.exit(1)
generate_csv(0, 'table_a.csv')
generate_csv(NUM_COLS - num_overlap, 'table_b.csv')
| #! /usr/bin/env python
from __future__ import print_function
import pandas as pd
import numpy as np
import argparse
def generate_csv(start_index, fname):
cols = [
str('A' + str(i)) for i in range(start_index, NUM_COLS + start_index)
]
data = []
for i in range(NUM_ROWS):
vals = (np.random.choice(NUM_DISTINCT_VALS) for j in range(NUM_COLS))
data.append(vals)
df = pd.DataFrame(data=data, columns=cols)
df.to_csv(fname, index=False, header=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate sample tables to test joins.')
parser.add_argument('--num-rows', '-r', type=int, default=100)
parser.add_argument('--num-cols', '-c', type=int, required=True)
parser.add_argument('--num-distinct-vals', '-d', type=int, required=True)
parser.add_argument('--num-cols-overlap', '-o', type=int, default=1)
args = parser.parse_args()
NUM_ROWS = args.num_rows
NUM_COLS = args.num_cols
NUM_DISTINCT_VALS = args.num_distinct_vals
num_overlap = args.num_cols_overlap
if num_overlap > NUM_COLS:
print('--num-cols-overlap cannot be greater than --num-cols')
import sys
sys.exit(1)
generate_csv(0, 'table_a.csv')
generate_csv(NUM_COLS - num_overlap, 'table_b.csv')
| ru | 0.148623 | #! /usr/bin/env python | 3.163573 | 3 |
Advanced/1- Introduction/5- Index_words.py | AlirezaMojtabavi/Python_Practice | 0 | 10621 |
indexWords = list()
def PreviousWord(_list, _word):
if _list[_list.index(_word)-1] :
return _list[_list.index(_word)-1]
else:
return
phrase = str(input())
phraseList = phrase.split(" ")
length = len(phraseList)
for item in phraseList :
item = item.strip()
if phrase != "" :
for i in range(1, length-1) :
lengthOfWord = len(phraseList[i])
if phraseList[i][0].isupper() :
if PreviousWord(phraseList, phraseList[i])[-1] != "." :
if phraseList[i][-1]=="." or phraseList[i][-1]=="," :
indexWords.append(i + 1)
indexWords.append(phraseList[i][: lengthOfWord-1])
elif phraseList[i][-1]== "]" and phraseList[i][-2]== "'" :
indexWords.append(i + 1)
indexWords.append(phraseList[i][: lengthOfWord-2])
else :
indexWords.append(i + 1)
indexWords.append(phraseList[i])
else:
print("None")
lengthOfIndexWord = len(indexWords)
if lengthOfIndexWord == 0 :
print("None")
else:
for i in range(0, lengthOfIndexWord//2):
print("%i:%s" %(indexWords[2*i],indexWords[(2*i)+1])) |
indexWords = list()
def PreviousWord(_list, _word):
if _list[_list.index(_word)-1] :
return _list[_list.index(_word)-1]
else:
return
phrase = str(input())
phraseList = phrase.split(" ")
length = len(phraseList)
for item in phraseList :
item = item.strip()
if phrase != "" :
for i in range(1, length-1) :
lengthOfWord = len(phraseList[i])
if phraseList[i][0].isupper() :
if PreviousWord(phraseList, phraseList[i])[-1] != "." :
if phraseList[i][-1]=="." or phraseList[i][-1]=="," :
indexWords.append(i + 1)
indexWords.append(phraseList[i][: lengthOfWord-1])
elif phraseList[i][-1]== "]" and phraseList[i][-2]== "'" :
indexWords.append(i + 1)
indexWords.append(phraseList[i][: lengthOfWord-2])
else :
indexWords.append(i + 1)
indexWords.append(phraseList[i])
else:
print("None")
lengthOfIndexWord = len(indexWords)
if lengthOfIndexWord == 0 :
print("None")
else:
for i in range(0, lengthOfIndexWord//2):
print("%i:%s" %(indexWords[2*i],indexWords[(2*i)+1])) | none | 1 | 3.616851 | 4 |
|
appengine/findit/handlers/test/completed_build_pubsub_ingestor_test.py | xswz8015/infra | 0 | 10622 | <filename>appengine/findit/handlers/test/completed_build_pubsub_ingestor_test.py
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import json
import mock
import webapp2
from google.appengine.api import taskqueue
from go.chromium.org.luci.buildbucket.proto.build_pb2 import Build
from testing_utils.testing import AppengineTestCase
from common.findit_http_client import FinditHttpClient
from common.waterfall import buildbucket_client
from handlers import completed_build_pubsub_ingestor
from model.isolated_target import IsolatedTarget
class CompletedBuildPubsubIngestorTest(AppengineTestCase):
app_module = webapp2.WSGIApplication([
('/index-isolated-builds',
completed_build_pubsub_ingestor.CompletedBuildPubsubIngestor),
],
debug=True)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testSucessfulPushCIBuild(self, mock_post, mock_get_build, *_):
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.input.properties['builder_group'] = 'chromium.linux'
mock_build.output.properties['buildername'] = 'Linux Builder'
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}'
)['mock_target'] = 'mock_hash'
gitiles_commit = mock_build.input.gitiles_commit
gitiles_commit.host = 'gitiles.host'
gitiles_commit.project = 'gitiles/project'
gitiles_commit.ref = 'refs/heads/mockmaster'
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'Linux Builder'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertEqual(
123,
IsolatedTarget.get_by_id(
'8945610992972640896/mock_target').commit_position)
self.assertEqual(
8945610992972640896,
IsolatedTarget.get_by_id('8945610992972640896/mock_target').build_id)
self.assertEqual(1, len(json.loads(response.body)['created_rows']))
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testPushNoBuild(self, mock_post, *_):
mock_headers = {'X-Prpc-Grpc-Code': '5'}
mock_post.return_value = (404, 'Build not found', mock_headers)
request_body = json.dumps({
'message': {
'attributes': {
'build_id': '123456',
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'result': 'SUCCESS',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body, status=200)
self.assertEqual(200, response.status_int)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testPushPendingBuild(self, mock_post, *_):
request_body = json.dumps({
'message': {
'attributes': {
'build_id': '123456',
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'PENDING',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertFalse(mock_post.called)
self.assertEqual(200, response.status_int)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testSucessfulPushBadFormat(self, mock_post, *_):
request_body = json.dumps({
'message': {},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertFalse(mock_post.called)
self.assertEqual(200, response.status_int)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testNonIsolateBuild(self, mock_post, mock_get_build, *_):
# This build does not isolate any targets.
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.input.properties['builder_group'] = 'chromium.linux'
mock_build.output.properties['buildername'] = 'Linux Tester'
gitiles_commit = mock_build.input.gitiles_commit
gitiles_commit.host = 'gitiles.host'
gitiles_commit.project = 'gitiles/project'
gitiles_commit.ref = 'refs/heads/mockmaster'
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'Linux Tester'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertNotIn('created_rows', response.body)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testNoMasternameBuild(self, mock_post, mock_get_build, *_):
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.output.properties['buildername'] = 'Linux Builder'
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}'
)['mock_target'] = 'mock_hash'
gitiles_commit = mock_build.input.gitiles_commit
gitiles_commit.host = 'gitiles.host'
gitiles_commit.project = 'gitiles/project'
gitiles_commit.ref = 'refs/heads/mockmaster'
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'Linux Builder'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertNotIn('created_rows', response.body)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testSucessfulPushTryJob(self, mock_post, mock_get_build, *_):
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.input.properties['builder_group'] = 'luci.chromium.findit'
mock_build.input.properties['target_builder_group'] = 'chromium.linux'
mock_build.output.properties['buildername'] = ('findit_variable')
mock_build.output.properties['target_buildername'] = (
'linux_chromium_compile_dbg_ng')
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}_with_patch'
)['mock_target'] = 'mock_hash'
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}_without_patch'
)['mock_target'] = 'mock_hash_without'
mock_build.output.properties['repository'] = (
'https://test.googlesource.com/team/project.git')
mock_build.output.properties['gitiles_ref'] = 'refs/heads/mockmaster'
mock_change = mock_build.input.gerrit_changes.add()
mock_change.host = 'mock.gerrit.host'
mock_change.change = 12345
mock_change.patchset = 1
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'findit_variable'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertEqual(
123,
IsolatedTarget.get_by_id(
'8945610992972640896/mock_target').commit_position)
self.assertEqual(2, len(json.loads(response.body)['created_rows']))
# Ensure target values were used.
entry = IsolatedTarget.get_by_id('8945610992972640896/mock_target')
self.assertEqual('chromium.linux', entry.master_name)
self.assertEqual('linux_chromium_compile_dbg_ng', entry.builder_name)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testPushIgnoreV2Push(self, mock_post, *_):
request_body = json.dumps({
'message': {
'attributes': {
'build_id': '123456',
'version': 'v2',
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertFalse(mock_post.called)
self.assertEqual(200, response.status_int)
| <filename>appengine/findit/handlers/test/completed_build_pubsub_ingestor_test.py
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import json
import mock
import webapp2
from google.appengine.api import taskqueue
from go.chromium.org.luci.buildbucket.proto.build_pb2 import Build
from testing_utils.testing import AppengineTestCase
from common.findit_http_client import FinditHttpClient
from common.waterfall import buildbucket_client
from handlers import completed_build_pubsub_ingestor
from model.isolated_target import IsolatedTarget
class CompletedBuildPubsubIngestorTest(AppengineTestCase):
app_module = webapp2.WSGIApplication([
('/index-isolated-builds',
completed_build_pubsub_ingestor.CompletedBuildPubsubIngestor),
],
debug=True)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testSucessfulPushCIBuild(self, mock_post, mock_get_build, *_):
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.input.properties['builder_group'] = 'chromium.linux'
mock_build.output.properties['buildername'] = 'Linux Builder'
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}'
)['mock_target'] = 'mock_hash'
gitiles_commit = mock_build.input.gitiles_commit
gitiles_commit.host = 'gitiles.host'
gitiles_commit.project = 'gitiles/project'
gitiles_commit.ref = 'refs/heads/mockmaster'
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'Linux Builder'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertEqual(
123,
IsolatedTarget.get_by_id(
'8945610992972640896/mock_target').commit_position)
self.assertEqual(
8945610992972640896,
IsolatedTarget.get_by_id('8945610992972640896/mock_target').build_id)
self.assertEqual(1, len(json.loads(response.body)['created_rows']))
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testPushNoBuild(self, mock_post, *_):
mock_headers = {'X-Prpc-Grpc-Code': '5'}
mock_post.return_value = (404, 'Build not found', mock_headers)
request_body = json.dumps({
'message': {
'attributes': {
'build_id': '123456',
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'result': 'SUCCESS',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body, status=200)
self.assertEqual(200, response.status_int)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testPushPendingBuild(self, mock_post, *_):
request_body = json.dumps({
'message': {
'attributes': {
'build_id': '123456',
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'PENDING',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertFalse(mock_post.called)
self.assertEqual(200, response.status_int)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testSucessfulPushBadFormat(self, mock_post, *_):
request_body = json.dumps({
'message': {},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertFalse(mock_post.called)
self.assertEqual(200, response.status_int)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testNonIsolateBuild(self, mock_post, mock_get_build, *_):
# This build does not isolate any targets.
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.input.properties['builder_group'] = 'chromium.linux'
mock_build.output.properties['buildername'] = 'Linux Tester'
gitiles_commit = mock_build.input.gitiles_commit
gitiles_commit.host = 'gitiles.host'
gitiles_commit.project = 'gitiles/project'
gitiles_commit.ref = 'refs/heads/mockmaster'
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'Linux Tester'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertNotIn('created_rows', response.body)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testNoMasternameBuild(self, mock_post, mock_get_build, *_):
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.output.properties['buildername'] = 'Linux Builder'
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}'
)['mock_target'] = 'mock_hash'
gitiles_commit = mock_build.input.gitiles_commit
gitiles_commit.host = 'gitiles.host'
gitiles_commit.project = 'gitiles/project'
gitiles_commit.ref = 'refs/heads/mockmaster'
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'Linux Builder'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertNotIn('created_rows', response.body)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testSucessfulPushTryJob(self, mock_post, mock_get_build, *_):
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.input.properties['builder_group'] = 'luci.chromium.findit'
mock_build.input.properties['target_builder_group'] = 'chromium.linux'
mock_build.output.properties['buildername'] = ('findit_variable')
mock_build.output.properties['target_buildername'] = (
'linux_chromium_compile_dbg_ng')
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}_with_patch'
)['mock_target'] = 'mock_hash'
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}_without_patch'
)['mock_target'] = 'mock_hash_without'
mock_build.output.properties['repository'] = (
'https://test.googlesource.com/team/project.git')
mock_build.output.properties['gitiles_ref'] = 'refs/heads/mockmaster'
mock_change = mock_build.input.gerrit_changes.add()
mock_change.host = 'mock.gerrit.host'
mock_change.change = 12345
mock_change.patchset = 1
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'findit_variable'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertEqual(
123,
IsolatedTarget.get_by_id(
'8945610992972640896/mock_target').commit_position)
self.assertEqual(2, len(json.loads(response.body)['created_rows']))
# Ensure target values were used.
entry = IsolatedTarget.get_by_id('8945610992972640896/mock_target')
self.assertEqual('chromium.linux', entry.master_name)
self.assertEqual('linux_chromium_compile_dbg_ng', entry.builder_name)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testPushIgnoreV2Push(self, mock_post, *_):
request_body = json.dumps({
'message': {
'attributes': {
'build_id': '123456',
'version': 'v2',
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertFalse(mock_post.called)
self.assertEqual(200, response.status_int)
| en | 0.922901 | # Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. #123}' # This build does not isolate any targets. #123}' #123}_with_patch' #123}_without_patch' # Ensure target values were used. | 1.856722 | 2 |
sdk/python/pulumi_azure_native/containerservice/v20191027preview/open_shift_managed_cluster.py | sebtelko/pulumi-azure-native | 0 | 10623 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['OpenShiftManagedClusterArgs', 'OpenShiftManagedCluster']
@pulumi.input_type
class OpenShiftManagedClusterArgs:
def __init__(__self__, *,
open_shift_version: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]]] = None,
auth_profile: Optional[pulumi.Input['OpenShiftManagedClusterAuthProfileArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
master_pool_profile: Optional[pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs']] = None,
monitor_profile: Optional[pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs']] = None,
network_profile: Optional[pulumi.Input['NetworkProfileArgs']] = None,
plan: Optional[pulumi.Input['PurchasePlanArgs']] = None,
refresh_cluster: Optional[pulumi.Input[bool]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
router_profiles: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a OpenShiftManagedCluster resource.
:param pulumi.Input[str] open_shift_version: Version of OpenShift specified when creating the cluster.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]] agent_pool_profiles: Configuration of OpenShift cluster VMs.
:param pulumi.Input['OpenShiftManagedClusterAuthProfileArgs'] auth_profile: Configures OpenShift authentication.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs'] master_pool_profile: Configuration for OpenShift master VMs.
:param pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs'] monitor_profile: Configures Log Analytics integration.
:param pulumi.Input['NetworkProfileArgs'] network_profile: Configuration for OpenShift networking.
:param pulumi.Input['PurchasePlanArgs'] plan: Define the resource plan as required by ARM for billing purposes
:param pulumi.Input[bool] refresh_cluster: Allows node rotation
:param pulumi.Input[str] resource_name: The name of the OpenShift managed cluster resource.
:param pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]] router_profiles: Configuration for OpenShift router(s).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
pulumi.set(__self__, "open_shift_version", open_shift_version)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if agent_pool_profiles is not None:
pulumi.set(__self__, "agent_pool_profiles", agent_pool_profiles)
if auth_profile is not None:
pulumi.set(__self__, "auth_profile", auth_profile)
if location is not None:
pulumi.set(__self__, "location", location)
if master_pool_profile is not None:
pulumi.set(__self__, "master_pool_profile", master_pool_profile)
if monitor_profile is not None:
pulumi.set(__self__, "monitor_profile", monitor_profile)
if network_profile is not None:
pulumi.set(__self__, "network_profile", network_profile)
if plan is not None:
pulumi.set(__self__, "plan", plan)
if refresh_cluster is not None:
pulumi.set(__self__, "refresh_cluster", refresh_cluster)
if resource_name is not None:
pulumi.set(__self__, "resource_name", resource_name)
if router_profiles is not None:
pulumi.set(__self__, "router_profiles", router_profiles)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="openShiftVersion")
def open_shift_version(self) -> pulumi.Input[str]:
"""
Version of OpenShift specified when creating the cluster.
"""
return pulumi.get(self, "open_shift_version")
@open_shift_version.setter
def open_shift_version(self, value: pulumi.Input[str]):
pulumi.set(self, "open_shift_version", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="agentPoolProfiles")
def agent_pool_profiles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]]]:
"""
Configuration of OpenShift cluster VMs.
"""
return pulumi.get(self, "agent_pool_profiles")
@agent_pool_profiles.setter
def agent_pool_profiles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]]]):
pulumi.set(self, "agent_pool_profiles", value)
@property
@pulumi.getter(name="authProfile")
def auth_profile(self) -> Optional[pulumi.Input['OpenShiftManagedClusterAuthProfileArgs']]:
"""
Configures OpenShift authentication.
"""
return pulumi.get(self, "auth_profile")
@auth_profile.setter
def auth_profile(self, value: Optional[pulumi.Input['OpenShiftManagedClusterAuthProfileArgs']]):
pulumi.set(self, "auth_profile", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="masterPoolProfile")
def master_pool_profile(self) -> Optional[pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs']]:
"""
Configuration for OpenShift master VMs.
"""
return pulumi.get(self, "master_pool_profile")
@master_pool_profile.setter
def master_pool_profile(self, value: Optional[pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs']]):
pulumi.set(self, "master_pool_profile", value)
@property
@pulumi.getter(name="monitorProfile")
def monitor_profile(self) -> Optional[pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs']]:
"""
Configures Log Analytics integration.
"""
return pulumi.get(self, "monitor_profile")
@monitor_profile.setter
def monitor_profile(self, value: Optional[pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs']]):
pulumi.set(self, "monitor_profile", value)
@property
@pulumi.getter(name="networkProfile")
def network_profile(self) -> Optional[pulumi.Input['NetworkProfileArgs']]:
"""
Configuration for OpenShift networking.
"""
return pulumi.get(self, "network_profile")
@network_profile.setter
def network_profile(self, value: Optional[pulumi.Input['NetworkProfileArgs']]):
pulumi.set(self, "network_profile", value)
@property
@pulumi.getter
def plan(self) -> Optional[pulumi.Input['PurchasePlanArgs']]:
"""
Define the resource plan as required by ARM for billing purposes
"""
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: Optional[pulumi.Input['PurchasePlanArgs']]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter(name="refreshCluster")
def refresh_cluster(self) -> Optional[pulumi.Input[bool]]:
"""
Allows node rotation
"""
return pulumi.get(self, "refresh_cluster")
@refresh_cluster.setter
def refresh_cluster(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "refresh_cluster", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the OpenShift managed cluster resource.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="routerProfiles")
def router_profiles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]]]:
"""
Configuration for OpenShift router(s).
"""
return pulumi.get(self, "router_profiles")
@router_profiles.setter
def router_profiles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]]]):
pulumi.set(self, "router_profiles", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class OpenShiftManagedCluster(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAgentPoolProfileArgs']]]]] = None,
auth_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAuthProfileArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
master_pool_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMasterPoolProfileArgs']]] = None,
monitor_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMonitorProfileArgs']]] = None,
network_profile: Optional[pulumi.Input[pulumi.InputType['NetworkProfileArgs']]] = None,
open_shift_version: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['PurchasePlanArgs']]] = None,
refresh_cluster: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
router_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftRouterProfileArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
OpenShift Managed cluster.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAgentPoolProfileArgs']]]] agent_pool_profiles: Configuration of OpenShift cluster VMs.
:param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAuthProfileArgs']] auth_profile: Configures OpenShift authentication.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMasterPoolProfileArgs']] master_pool_profile: Configuration for OpenShift master VMs.
:param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMonitorProfileArgs']] monitor_profile: Configures Log Analytics integration.
:param pulumi.Input[pulumi.InputType['NetworkProfileArgs']] network_profile: Configuration for OpenShift networking.
:param pulumi.Input[str] open_shift_version: Version of OpenShift specified when creating the cluster.
:param pulumi.Input[pulumi.InputType['PurchasePlanArgs']] plan: Define the resource plan as required by ARM for billing purposes
:param pulumi.Input[bool] refresh_cluster: Allows node rotation
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_name_: The name of the OpenShift managed cluster resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftRouterProfileArgs']]]] router_profiles: Configuration for OpenShift router(s).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: OpenShiftManagedClusterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
OpenShift Managed cluster.
:param str resource_name: The name of the resource.
:param OpenShiftManagedClusterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OpenShiftManagedClusterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAgentPoolProfileArgs']]]]] = None,
auth_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAuthProfileArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
master_pool_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMasterPoolProfileArgs']]] = None,
monitor_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMonitorProfileArgs']]] = None,
network_profile: Optional[pulumi.Input[pulumi.InputType['NetworkProfileArgs']]] = None,
open_shift_version: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['PurchasePlanArgs']]] = None,
refresh_cluster: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
router_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftRouterProfileArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OpenShiftManagedClusterArgs.__new__(OpenShiftManagedClusterArgs)
__props__.__dict__["agent_pool_profiles"] = agent_pool_profiles
__props__.__dict__["auth_profile"] = auth_profile
__props__.__dict__["location"] = location
__props__.__dict__["master_pool_profile"] = master_pool_profile
__props__.__dict__["monitor_profile"] = monitor_profile
__props__.__dict__["network_profile"] = network_profile
if open_shift_version is None and not opts.urn:
raise TypeError("Missing required property 'open_shift_version'")
__props__.__dict__["open_shift_version"] = open_shift_version
__props__.__dict__["plan"] = plan
__props__.__dict__["refresh_cluster"] = refresh_cluster
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["router_profiles"] = router_profiles
__props__.__dict__["tags"] = tags
__props__.__dict__["cluster_version"] = None
__props__.__dict__["fqdn"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_hostname"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerservice/v20191027preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20180930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20180930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20190430:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20190430:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20190930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20190930preview:OpenShiftManagedCluster")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(OpenShiftManagedCluster, __self__).__init__(
'azure-native:containerservice/v20191027preview:OpenShiftManagedCluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'OpenShiftManagedCluster':
"""
Get an existing OpenShiftManagedCluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = OpenShiftManagedClusterArgs.__new__(OpenShiftManagedClusterArgs)
__props__.__dict__["agent_pool_profiles"] = None
__props__.__dict__["auth_profile"] = None
__props__.__dict__["cluster_version"] = None
__props__.__dict__["fqdn"] = None
__props__.__dict__["location"] = None
__props__.__dict__["master_pool_profile"] = None
__props__.__dict__["monitor_profile"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_profile"] = None
__props__.__dict__["open_shift_version"] = None
__props__.__dict__["plan"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_hostname"] = None
__props__.__dict__["refresh_cluster"] = None
__props__.__dict__["router_profiles"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return OpenShiftManagedCluster(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="agentPoolProfiles")
def agent_pool_profiles(self) -> pulumi.Output[Optional[Sequence['outputs.OpenShiftManagedClusterAgentPoolProfileResponse']]]:
"""
Configuration of OpenShift cluster VMs.
"""
return pulumi.get(self, "agent_pool_profiles")
@property
@pulumi.getter(name="authProfile")
def auth_profile(self) -> pulumi.Output[Optional['outputs.OpenShiftManagedClusterAuthProfileResponse']]:
"""
Configures OpenShift authentication.
"""
return pulumi.get(self, "auth_profile")
@property
@pulumi.getter(name="clusterVersion")
def cluster_version(self) -> pulumi.Output[str]:
"""
Version of OpenShift specified when creating the cluster.
"""
return pulumi.get(self, "cluster_version")
@property
@pulumi.getter
def fqdn(self) -> pulumi.Output[str]:
"""
Service generated FQDN for OpenShift API server loadbalancer internal hostname.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="masterPoolProfile")
def master_pool_profile(self) -> pulumi.Output[Optional['outputs.OpenShiftManagedClusterMasterPoolProfileResponse']]:
"""
Configuration for OpenShift master VMs.
"""
return pulumi.get(self, "master_pool_profile")
@property
@pulumi.getter(name="monitorProfile")
def monitor_profile(self) -> pulumi.Output[Optional['outputs.OpenShiftManagedClusterMonitorProfileResponse']]:
"""
Configures Log Analytics integration.
"""
return pulumi.get(self, "monitor_profile")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkProfile")
def network_profile(self) -> pulumi.Output[Optional['outputs.NetworkProfileResponse']]:
"""
Configuration for OpenShift networking.
"""
return pulumi.get(self, "network_profile")
@property
@pulumi.getter(name="openShiftVersion")
def open_shift_version(self) -> pulumi.Output[str]:
"""
Version of OpenShift specified when creating the cluster.
"""
return pulumi.get(self, "open_shift_version")
@property
@pulumi.getter
def plan(self) -> pulumi.Output[Optional['outputs.PurchasePlanResponse']]:
"""
Define the resource plan as required by ARM for billing purposes
"""
return pulumi.get(self, "plan")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The current deployment or provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicHostname")
def public_hostname(self) -> pulumi.Output[str]:
"""
Service generated FQDN or private IP for OpenShift API server.
"""
return pulumi.get(self, "public_hostname")
@property
@pulumi.getter(name="refreshCluster")
def refresh_cluster(self) -> pulumi.Output[Optional[bool]]:
"""
Allows node rotation
"""
return pulumi.get(self, "refresh_cluster")
@property
@pulumi.getter(name="routerProfiles")
def router_profiles(self) -> pulumi.Output[Optional[Sequence['outputs.OpenShiftRouterProfileResponse']]]:
"""
Configuration for OpenShift router(s).
"""
return pulumi.get(self, "router_profiles")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['OpenShiftManagedClusterArgs', 'OpenShiftManagedCluster']
@pulumi.input_type
class OpenShiftManagedClusterArgs:
def __init__(__self__, *,
open_shift_version: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]]] = None,
auth_profile: Optional[pulumi.Input['OpenShiftManagedClusterAuthProfileArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
master_pool_profile: Optional[pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs']] = None,
monitor_profile: Optional[pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs']] = None,
network_profile: Optional[pulumi.Input['NetworkProfileArgs']] = None,
plan: Optional[pulumi.Input['PurchasePlanArgs']] = None,
refresh_cluster: Optional[pulumi.Input[bool]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
router_profiles: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a OpenShiftManagedCluster resource.
:param pulumi.Input[str] open_shift_version: Version of OpenShift specified when creating the cluster.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]] agent_pool_profiles: Configuration of OpenShift cluster VMs.
:param pulumi.Input['OpenShiftManagedClusterAuthProfileArgs'] auth_profile: Configures OpenShift authentication.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs'] master_pool_profile: Configuration for OpenShift master VMs.
:param pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs'] monitor_profile: Configures Log Analytics integration.
:param pulumi.Input['NetworkProfileArgs'] network_profile: Configuration for OpenShift networking.
:param pulumi.Input['PurchasePlanArgs'] plan: Define the resource plan as required by ARM for billing purposes
:param pulumi.Input[bool] refresh_cluster: Allows node rotation
:param pulumi.Input[str] resource_name: The name of the OpenShift managed cluster resource.
:param pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]] router_profiles: Configuration for OpenShift router(s).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
pulumi.set(__self__, "open_shift_version", open_shift_version)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if agent_pool_profiles is not None:
pulumi.set(__self__, "agent_pool_profiles", agent_pool_profiles)
if auth_profile is not None:
pulumi.set(__self__, "auth_profile", auth_profile)
if location is not None:
pulumi.set(__self__, "location", location)
if master_pool_profile is not None:
pulumi.set(__self__, "master_pool_profile", master_pool_profile)
if monitor_profile is not None:
pulumi.set(__self__, "monitor_profile", monitor_profile)
if network_profile is not None:
pulumi.set(__self__, "network_profile", network_profile)
if plan is not None:
pulumi.set(__self__, "plan", plan)
if refresh_cluster is not None:
pulumi.set(__self__, "refresh_cluster", refresh_cluster)
if resource_name is not None:
pulumi.set(__self__, "resource_name", resource_name)
if router_profiles is not None:
pulumi.set(__self__, "router_profiles", router_profiles)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="openShiftVersion")
def open_shift_version(self) -> pulumi.Input[str]:
"""
Version of OpenShift specified when creating the cluster.
"""
return pulumi.get(self, "open_shift_version")
@open_shift_version.setter
def open_shift_version(self, value: pulumi.Input[str]):
pulumi.set(self, "open_shift_version", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="agentPoolProfiles")
def agent_pool_profiles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]]]:
"""
Configuration of OpenShift cluster VMs.
"""
return pulumi.get(self, "agent_pool_profiles")
@agent_pool_profiles.setter
def agent_pool_profiles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]]]):
pulumi.set(self, "agent_pool_profiles", value)
@property
@pulumi.getter(name="authProfile")
def auth_profile(self) -> Optional[pulumi.Input['OpenShiftManagedClusterAuthProfileArgs']]:
"""
Configures OpenShift authentication.
"""
return pulumi.get(self, "auth_profile")
@auth_profile.setter
def auth_profile(self, value: Optional[pulumi.Input['OpenShiftManagedClusterAuthProfileArgs']]):
pulumi.set(self, "auth_profile", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="masterPoolProfile")
def master_pool_profile(self) -> Optional[pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs']]:
"""
Configuration for OpenShift master VMs.
"""
return pulumi.get(self, "master_pool_profile")
@master_pool_profile.setter
def master_pool_profile(self, value: Optional[pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs']]):
pulumi.set(self, "master_pool_profile", value)
@property
@pulumi.getter(name="monitorProfile")
def monitor_profile(self) -> Optional[pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs']]:
"""
Configures Log Analytics integration.
"""
return pulumi.get(self, "monitor_profile")
@monitor_profile.setter
def monitor_profile(self, value: Optional[pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs']]):
pulumi.set(self, "monitor_profile", value)
@property
@pulumi.getter(name="networkProfile")
def network_profile(self) -> Optional[pulumi.Input['NetworkProfileArgs']]:
"""
Configuration for OpenShift networking.
"""
return pulumi.get(self, "network_profile")
@network_profile.setter
def network_profile(self, value: Optional[pulumi.Input['NetworkProfileArgs']]):
pulumi.set(self, "network_profile", value)
@property
@pulumi.getter
def plan(self) -> Optional[pulumi.Input['PurchasePlanArgs']]:
"""
Define the resource plan as required by ARM for billing purposes
"""
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: Optional[pulumi.Input['PurchasePlanArgs']]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter(name="refreshCluster")
def refresh_cluster(self) -> Optional[pulumi.Input[bool]]:
"""
Allows node rotation
"""
return pulumi.get(self, "refresh_cluster")
@refresh_cluster.setter
def refresh_cluster(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "refresh_cluster", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the OpenShift managed cluster resource.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="routerProfiles")
def router_profiles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]]]:
"""
Configuration for OpenShift router(s).
"""
return pulumi.get(self, "router_profiles")
@router_profiles.setter
def router_profiles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]]]):
pulumi.set(self, "router_profiles", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class OpenShiftManagedCluster(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAgentPoolProfileArgs']]]]] = None,
auth_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAuthProfileArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
master_pool_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMasterPoolProfileArgs']]] = None,
monitor_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMonitorProfileArgs']]] = None,
network_profile: Optional[pulumi.Input[pulumi.InputType['NetworkProfileArgs']]] = None,
open_shift_version: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['PurchasePlanArgs']]] = None,
refresh_cluster: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
router_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftRouterProfileArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
OpenShift Managed cluster.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAgentPoolProfileArgs']]]] agent_pool_profiles: Configuration of OpenShift cluster VMs.
:param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAuthProfileArgs']] auth_profile: Configures OpenShift authentication.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMasterPoolProfileArgs']] master_pool_profile: Configuration for OpenShift master VMs.
:param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMonitorProfileArgs']] monitor_profile: Configures Log Analytics integration.
:param pulumi.Input[pulumi.InputType['NetworkProfileArgs']] network_profile: Configuration for OpenShift networking.
:param pulumi.Input[str] open_shift_version: Version of OpenShift specified when creating the cluster.
:param pulumi.Input[pulumi.InputType['PurchasePlanArgs']] plan: Define the resource plan as required by ARM for billing purposes
:param pulumi.Input[bool] refresh_cluster: Allows node rotation
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_name_: The name of the OpenShift managed cluster resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftRouterProfileArgs']]]] router_profiles: Configuration for OpenShift router(s).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: OpenShiftManagedClusterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
OpenShift Managed cluster.
:param str resource_name: The name of the resource.
:param OpenShiftManagedClusterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OpenShiftManagedClusterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAgentPoolProfileArgs']]]]] = None,
auth_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAuthProfileArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
master_pool_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMasterPoolProfileArgs']]] = None,
monitor_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMonitorProfileArgs']]] = None,
network_profile: Optional[pulumi.Input[pulumi.InputType['NetworkProfileArgs']]] = None,
open_shift_version: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['PurchasePlanArgs']]] = None,
refresh_cluster: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
router_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftRouterProfileArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OpenShiftManagedClusterArgs.__new__(OpenShiftManagedClusterArgs)
__props__.__dict__["agent_pool_profiles"] = agent_pool_profiles
__props__.__dict__["auth_profile"] = auth_profile
__props__.__dict__["location"] = location
__props__.__dict__["master_pool_profile"] = master_pool_profile
__props__.__dict__["monitor_profile"] = monitor_profile
__props__.__dict__["network_profile"] = network_profile
if open_shift_version is None and not opts.urn:
raise TypeError("Missing required property 'open_shift_version'")
__props__.__dict__["open_shift_version"] = open_shift_version
__props__.__dict__["plan"] = plan
__props__.__dict__["refresh_cluster"] = refresh_cluster
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["router_profiles"] = router_profiles
__props__.__dict__["tags"] = tags
__props__.__dict__["cluster_version"] = None
__props__.__dict__["fqdn"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_hostname"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerservice/v20191027preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20180930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20180930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20190430:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20190430:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20190930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20190930preview:OpenShiftManagedCluster")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(OpenShiftManagedCluster, __self__).__init__(
'azure-native:containerservice/v20191027preview:OpenShiftManagedCluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'OpenShiftManagedCluster':
"""
Get an existing OpenShiftManagedCluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = OpenShiftManagedClusterArgs.__new__(OpenShiftManagedClusterArgs)
__props__.__dict__["agent_pool_profiles"] = None
__props__.__dict__["auth_profile"] = None
__props__.__dict__["cluster_version"] = None
__props__.__dict__["fqdn"] = None
__props__.__dict__["location"] = None
__props__.__dict__["master_pool_profile"] = None
__props__.__dict__["monitor_profile"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_profile"] = None
__props__.__dict__["open_shift_version"] = None
__props__.__dict__["plan"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_hostname"] = None
__props__.__dict__["refresh_cluster"] = None
__props__.__dict__["router_profiles"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return OpenShiftManagedCluster(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="agentPoolProfiles")
def agent_pool_profiles(self) -> pulumi.Output[Optional[Sequence['outputs.OpenShiftManagedClusterAgentPoolProfileResponse']]]:
"""
Configuration of OpenShift cluster VMs.
"""
return pulumi.get(self, "agent_pool_profiles")
@property
@pulumi.getter(name="authProfile")
def auth_profile(self) -> pulumi.Output[Optional['outputs.OpenShiftManagedClusterAuthProfileResponse']]:
"""
Configures OpenShift authentication.
"""
return pulumi.get(self, "auth_profile")
@property
@pulumi.getter(name="clusterVersion")
def cluster_version(self) -> pulumi.Output[str]:
"""
Version of OpenShift specified when creating the cluster.
"""
return pulumi.get(self, "cluster_version")
@property
@pulumi.getter
def fqdn(self) -> pulumi.Output[str]:
"""
Service generated FQDN for OpenShift API server loadbalancer internal hostname.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="masterPoolProfile")
def master_pool_profile(self) -> pulumi.Output[Optional['outputs.OpenShiftManagedClusterMasterPoolProfileResponse']]:
"""
Configuration for OpenShift master VMs.
"""
return pulumi.get(self, "master_pool_profile")
@property
@pulumi.getter(name="monitorProfile")
def monitor_profile(self) -> pulumi.Output[Optional['outputs.OpenShiftManagedClusterMonitorProfileResponse']]:
"""
Configures Log Analytics integration.
"""
return pulumi.get(self, "monitor_profile")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkProfile")
def network_profile(self) -> pulumi.Output[Optional['outputs.NetworkProfileResponse']]:
"""
Configuration for OpenShift networking.
"""
return pulumi.get(self, "network_profile")
@property
@pulumi.getter(name="openShiftVersion")
def open_shift_version(self) -> pulumi.Output[str]:
"""
Version of OpenShift specified when creating the cluster.
"""
return pulumi.get(self, "open_shift_version")
@property
@pulumi.getter
def plan(self) -> pulumi.Output[Optional['outputs.PurchasePlanResponse']]:
"""
Define the resource plan as required by ARM for billing purposes
"""
return pulumi.get(self, "plan")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The current deployment or provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicHostname")
def public_hostname(self) -> pulumi.Output[str]:
"""
Service generated FQDN or private IP for OpenShift API server.
"""
return pulumi.get(self, "public_hostname")
@property
@pulumi.getter(name="refreshCluster")
def refresh_cluster(self) -> pulumi.Output[Optional[bool]]:
"""
Allows node rotation
"""
return pulumi.get(self, "refresh_cluster")
@property
@pulumi.getter(name="routerProfiles")
def router_profiles(self) -> pulumi.Output[Optional[Sequence['outputs.OpenShiftRouterProfileResponse']]]:
"""
Configuration for OpenShift router(s).
"""
return pulumi.get(self, "router_profiles")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| en | 0.521206 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The set of arguments for constructing a OpenShiftManagedCluster resource. :param pulumi.Input[str] open_shift_version: Version of OpenShift specified when creating the cluster. :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]] agent_pool_profiles: Configuration of OpenShift cluster VMs. :param pulumi.Input['OpenShiftManagedClusterAuthProfileArgs'] auth_profile: Configures OpenShift authentication. :param pulumi.Input[str] location: Resource location :param pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs'] master_pool_profile: Configuration for OpenShift master VMs. :param pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs'] monitor_profile: Configures Log Analytics integration. :param pulumi.Input['NetworkProfileArgs'] network_profile: Configuration for OpenShift networking. :param pulumi.Input['PurchasePlanArgs'] plan: Define the resource plan as required by ARM for billing purposes :param pulumi.Input[bool] refresh_cluster: Allows node rotation :param pulumi.Input[str] resource_name: The name of the OpenShift managed cluster resource. :param pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]] router_profiles: Configuration for OpenShift router(s). :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags Version of OpenShift specified when creating the cluster. The name of the resource group. Configuration of OpenShift cluster VMs. Configures OpenShift authentication. Resource location Configuration for OpenShift master VMs. Configures Log Analytics integration. Configuration for OpenShift networking. Define the resource plan as required by ARM for billing purposes Allows node rotation The name of the OpenShift managed cluster resource. Configuration for OpenShift router(s). Resource tags OpenShift Managed cluster. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAgentPoolProfileArgs']]]] agent_pool_profiles: Configuration of OpenShift cluster VMs. :param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAuthProfileArgs']] auth_profile: Configures OpenShift authentication. :param pulumi.Input[str] location: Resource location :param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMasterPoolProfileArgs']] master_pool_profile: Configuration for OpenShift master VMs. :param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMonitorProfileArgs']] monitor_profile: Configures Log Analytics integration. :param pulumi.Input[pulumi.InputType['NetworkProfileArgs']] network_profile: Configuration for OpenShift networking. :param pulumi.Input[str] open_shift_version: Version of OpenShift specified when creating the cluster. :param pulumi.Input[pulumi.InputType['PurchasePlanArgs']] plan: Define the resource plan as required by ARM for billing purposes :param pulumi.Input[bool] refresh_cluster: Allows node rotation :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[str] resource_name_: The name of the OpenShift managed cluster resource. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftRouterProfileArgs']]]] router_profiles: Configuration for OpenShift router(s). :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags OpenShift Managed cluster. :param str resource_name: The name of the resource. :param OpenShiftManagedClusterArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. Get an existing OpenShiftManagedCluster resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. Configuration of OpenShift cluster VMs. Configures OpenShift authentication. Version of OpenShift specified when creating the cluster. Service generated FQDN for OpenShift API server loadbalancer internal hostname. Resource location Configuration for OpenShift master VMs. Configures Log Analytics integration. Resource name Configuration for OpenShift networking. Version of OpenShift specified when creating the cluster. Define the resource plan as required by ARM for billing purposes The current deployment or provisioning state, which only appears in the response. Service generated FQDN or private IP for OpenShift API server. Allows node rotation Configuration for OpenShift router(s). Resource tags Resource type | 1.591294 | 2 |
django_drf_server/quiz/migrations/0017_remove_quiz_questions.py | pammalPrasanna/quizie | 0 | 10624 | <gh_stars>0
# Generated by Django 3.2.4 on 2021-06-17 02:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quiz', '0016_auto_20210617_0724'),
]
operations = [
migrations.RemoveField(
model_name='quiz',
name='questions',
),
]
| # Generated by Django 3.2.4 on 2021-06-17 02:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quiz', '0016_auto_20210617_0724'),
]
operations = [
migrations.RemoveField(
model_name='quiz',
name='questions',
),
] | en | 0.869518 | # Generated by Django 3.2.4 on 2021-06-17 02:01 | 1.426361 | 1 |
main.py | khan-git/webRecipies | 0 | 10625 | <reponame>khan-git/webRecipies
# -*- coding: iso-8859-1 -*-
import os
import shutil
import datetime
import sqlite3
from flask import Flask, request, session, render_template, g, redirect, url_for, abort, flash, make_response
from random import randint
import json
import urllib2
import json
from json.decoder import JSONObject
from werkzeug.utils import secure_filename
UPLOAD_FOLDER = '/tmp'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
DBBACKUPPATH = os.path.abspath('db_backup')
if os.path.exists(DBBACKUPPATH) == False:
os.mkdir(DBBACKUPPATH)
app = Flask(__name__)
#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
app = Flask(__name__)
app.config.from_object(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'recipes.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='<PASSWORD>',
UPLOAD_FOLDER='/tmp'
))
app.config['UPPLOAD_FOLDER'] = '/tmp'
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
"""Connects to the specific database."""
if os.path.exists(app.config['DATABASE']) == False:
cmd = 'sqlite3 recipes.db < database.sql'
os.system(cmd)
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
def init_db():
db = get_db()
with app.open_resource('database.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def queryDbFetchOne(query):
"""Query database, return one result"""
db = get_db()
cur = db.cursor()
cur.execute(query)
return cur.fetchone()
def queryDbFetchAll(query):
"""Query database, return one result"""
db = get_db()
cur = db.cursor()
cur.execute(query)
return cur.fetchall()
def getRecipe(recipeKey):
"""Get recipe data"""
return queryDbFetchOne('SELECT * FROM recipes WHERE key="%s"'%recipeKey)
def getIngredients(recipeKey):
"""Get all ingredients for a recipe"""
return queryDbFetchAll('SELECT * FROM recipeAmount WHERE recipeKey="%s"'%recipeKey)
def getNextKey():
"""Get next number for key"""
currentHighKey = queryDbFetchOne('SELECT key FROM recipes ORDER BY key DESC')
if currentHighKey is None:
print "IS none %s"%currentHighKey
currentHighKey = 0
else:
currentHighKey = int(currentHighKey[0])
return currentHighKey +1
def insertIntoDb(table, names, values):
"""Insert into database"""
if len(values) != len(names):
return None
query = 'INSERT INTO %s (%s) VALUES(%s)'%(table, ', '.join(names), ', '.join(values))
rowId = None
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
finally:
return rowId
def doRawQuery(query):
"""Do a raw query"""
rowId = None
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
finally:
return rowId
def updateDb(table, names, values, where):
"""Update row in table"""
if len(values) != len(names):
return None
query = 'UPDATE %s SET '%(table)
qPairs = []
for name, value in zip(names,values):
qPairs.append('%s=%s'%(name,value))
query += ', '.join(x for x in qPairs)
query += ' %s'%where
rowId = None
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
finally:
return rowId
@app.route('/prepdb')
def prepdb():
"""Prepare database from json file"""
f = open('recipes.json','r')
buff = f.read()
recipes = json.loads(buff)
for item in recipes:
recipeKey = getNextKey()
rowId = insertIntoDb('recipes', ['key', 'title','instructions', 'portions'],
[recipeKey, '"%s"'%item['title'], '"%s"'%item['instructions'], item['portions']])
for ingredient in item['ingredients']:
keys = ingredient.keys()
keys.insert(0, 'recipeKey')
values = ingredient.values()
values.insert(0, recipeKey)
rId = insertIntoDb('recipeAmount', keys, values)
for group in item['recipeTag']:
insertIntoDb('recipeTag', ['recipeKey', 'group'], [recipeKey, '"%s"'%group])
if 'fridge' in item:
insertIntoDb('fridge', ['recipeKey', 'portions'], [recipeKey, item['fridge']])
print " Fridge %d"%item['fridge']
else:
print "No fridge"
return index()
@app.cli.command('initdb')
def initdb_command():
"""Initializes the database."""
init_db()
print 'Initialized the database.'
@app.route('/help')
def help():
values = {'pageId': 'help',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('help.html', **values)
@app.route('/')
def index():
values = {'pageId': 'index',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('index.html', **values)
# return redirect('login', code=304)
@app.route('/login', methods=['GET','POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != 'admin' or request.form['password'] != '<PASSWORD>':
error = 'Invalid Credentials. Please try again.'
else:
return redirect(url_for('favourite'), code=304)
values = {'pageId': 'index',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048),
'error': error
}
return render_template('login.html', **values)
@app.route('/editRecipe', methods=['GET'])
def editRecipe():
return newRecipe(request.args['recipeKey'])
@app.route('/deleteRecipe', methods=['GET'])
def deleteRecipe():
# TODO
if 'recipeKey' in request.args:
pass
pass
def deleteAmount(recipeKey):
query = 'DELETE FROM recipeAmount WHERE recipeKey=%s'%recipeKey
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
msg = "error in delete operation"
print msg
finally:
return rowId
@app.route('/newRecipe')
def newRecipe(recipeKey=None):
if recipeKey is not None:
recipe = getRecipe(recipeKey)
ingredients = getIngredients(recipeKey)
else:
recipe = None
ingredients = None
entries = queryDbFetchAll('SELECT name FROM ingredients ')
measurements = queryDbFetchAll('SELECT short FROM measurements ')
values = {'ingredientsList': entries,
'measurements':measurements,
'recipe':recipe,
'ingredients':ingredients,
'pageId': 'newRecipe',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('newRecipe.html', **values)
@app.route('/error')
def errorHtml():
values = {'pageId': 'error',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('error.html', **values)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/saveRecipe', methods=['POST'])
def saveRecipe():
# TODO add last update time
title = request.form['title']
names = ['title']
values = ['"%s"'%title]
if 'instructions' in request.form:
names.append('instructions')
values.append('"%s"'%request.form['instructions'])
if 'portions' in request.form:
names.append('portions')
values.append(request.form['portions'])
if 'recipeKey' in request.form:
recipeKey = request.form['recipeKey']
updateDb('recipes', names, values, 'WHERE key=%s'%recipeKey)
else:
recipeKey = getNextKey()
names.insert(0, 'key')
values.insert(0, '%d'%recipeKey)
if insertIntoDb('recipes', names, values) is None:
return json.dumps({'redirect':'false', 'result': 'Error creating recipe'})
amount = request.form.getlist('amount')
measurement = request.form.getlist('measurement')
ingredients = request.form.getlist('ingredient')
deleteAmount(recipeKey)
for a,m,i in zip(amount, measurement, ingredients):
names = ['recipeKey', 'ingredient', 'amount', 'measurement']
values = [str(recipeKey), '"%s"'%i, str(a), '"%s"'%m]
if insertIntoDb('recipeAmount', names, values) is None:
return json.dumps({'redirect':'false', 'result': 'Error creating recipe'})
return json.dumps({'redirect':True, 'url': '/show/recipe?recipe=%s'%recipeKey})
@app.route('/show/recipe', methods=['GET'])
def showRecipe():
recipeKey = request.args.get('recipe')
recipe = getRecipe(recipeKey)
return displayRecipe(recipe)
def displayRecipe(recipe):
values = {'key':recipe['key'],
'title': recipe['title'],
'instructions': recipe['instructions'],
'portions': recipe['portions'],
'ingredients': getIngredients(recipe['key']),
'pageId': 'displayRecipe',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('displayRecipe_template.html', **values)
@app.route('/randomRecipe', methods=['GET'])
def randomRecipe():
recipes = queryDbFetchAll('SELECT * FROM recipes ORDER BY RANDOM() LIMIT 4')
return render_template('listRecipes.html', header='Förslag:', lastRecipes=recipes)
@app.route('/menuSuggestion', methods=['GET'])
def menuSuggestion():
recipes = queryDbFetchAll('SELECT * FROM recipes ORDER BY RANDOM() LIMIT 4')
if 'update' in request.args:
return render_template('onlyList.html', lastRecipes=recipes)
values = {'pagetitle':'Receptakuten',
'title': 'Förslag:',
'lastRecipes': recipes,
'refresh': 'true',
'pageId': 'menuSuggestion',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('listRecipes.html', **values)
@app.route('/ajax/search', methods=['GET'])
def searchAjax():
if request.method == 'GET':
patterns = request.args.getlist('searchPatterns[]')
query = ''
for p in patterns:
if len(query) > 0:
query = '%s or '%query
query += 'title LIKE "%%%s%%" or instructions LIKE "%%%s%%"'%(p, p)
query = 'SELECT key, title FROM recipes WHERE %s LIMIT 10'%query
results = queryDbFetchAll(query)
t = []
for p in results:
h = {}
for k in p.keys():
h[k] = p[k]
t.append(h)
return json.dumps(t)
@app.route('/ajax/searchIngredient', methods=['GET'])
def searchIngredient():
if request.method == 'GET':
patterns = request.args.getlist('searchPatterns[]')
print patterns
query = ''
for p in patterns:
if len(query) > 0:
query = '%s or '%query
query += 'ingredient LIKE "%%%s%%"'%(p)
query = 'SELECT DISTINCT ingredient FROM recipeAmount WHERE %s'%query
print query
results = queryDbFetchAll(query)
t = []
for p in results:
h = {}
for k in p.keys():
h[k] = p[k]
t.append(h)
return json.dumps(t)
@app.route('/search')
def search():
values = {'pageId': 'search',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('search.html', **values)
def getFridgeJSON():
fridgeContent = queryDbFetchAll('SELECT key, title, fridge.portions AS portions FROM recipes INNER JOIN fridge ON recipes.key = fridge.recipeKey')
fridgeJson = []
for row in fridgeContent:
rowJson = {}
for key in row.keys():
rowJson[key] = row[key]
fridgeJson.append(rowJson)
return json.dumps(fridgeJson)
@app.route('/fromTheFridge')
def fromTheFridge():
values = {'pageId': 'fromTheFridge',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('whatsinthefridge.html', **values)
# Update fridge content
@app.route('/ajax/updateFridge', methods=['GET','POST'])
def updateFridge():
if request.method == 'POST':
recipesJson = request.form.getlist('recipes')
recipes = json.loads(recipesJson[0])
keys = []
for item in recipes:
keys.append(item['key'])
queryUpdate = 'UPDATE fridge SET portions=%d WHERE recipeKey=%d'%(item['portions'], item['key'])
queryInsert = 'INSERT INTO fridge (recipeKey, portions) SELECT %d,%d WHERE(Select Changes() = 0)'%(item['key'], item['portions'])
doRawQuery(queryUpdate)
doRawQuery(queryInsert)
currentKeys = queryDbFetchAll('SELECT recipeKey FROM fridge ORDER BY recipeKey')
for key in currentKeys:
if key['recipeKey'] not in keys:
deleteQuery = 'DELETE FROM fridge WHERE recipeKey=%s'%key['recipeKey']
doRawQuery(deleteQuery)
return getFridgeJSON()
@app.route('/groceryList')
def groceryList():
recipes = queryDbFetchAll('SELECT key, title, portions FROM recipes ORDER BY title')
ingredients = {}
for recipe in recipes:
ingredients[recipe['key']] = getIngredients(recipe['key'])
values = {'pageId': 'groceryList',
'recipes': recipes,
'ingredients': ingredients,
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('groceryList.html', **values)
@app.route('/favourite')
def favourite():
"""Show favourite recipes"""
values = {'pageId': 'favouritePage',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('favourite.html', **values)
@app.route('/ajax/getRecipesJson', methods=['GET','POST'])
def getRecipesJson():
if request.method == 'POST':
recipeKeys = request.form.getlist('recipe')
query = 'SELECT * FROM recipes where '
qyeryKeys = []
for recipes in recipeKeys:
jsonKeys = json.loads(recipes)
for key in jsonKeys:
qyeryKeys.append('key=%s'%key['recipeKey'])
query += ' OR '.join(qyeryKeys)
recipeList = queryDbFetchAll(query)
jsonReply = []
for rowRecipe in recipeList:
tmpJson = {}
for key in rowRecipe.keys():
tmpJson[key] = rowRecipe[key]
ingredientsJson = []
for row in getIngredients(rowRecipe['key']):
tmpIngredient = {}
for key in row.keys():
if key == 'recipeKey':
continue
tmpIngredient[key] = row[key]
ingredientsJson.append(tmpIngredient)
tmpJson['ingredients'] = ingredientsJson
jsonReply.append(tmpJson)
return json.dumps(jsonReply)
recipes = queryDbFetchAll('SELECT key, title FROM recipes')
rows = []
for i in recipes:
rows.append(dict(i))
return json.dumps(rows)
@app.route('/manifest.json')
def manifestJSON():
return url_for('static', filename='manifest.json')
@app.route('/manifest.appcache')
def manifest():
res = make_response(render_template('manifest.appcache'), 200)
res.headers["Content-Type"] = "text/cache-manifest"
return res
@app.route('/admin/restore', methods = ['POST'])
def dorestore():
versionF = os.path.abspath(os.path.join(DBBACKUPPATH, request.form.get('version')))
if os.path.exists(versionF):
now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
name = '%s_bfrestore.sql'%now
dobackup(name)
tables = queryDbFetchAll('SELECT name FROM sqlite_master WHERE type = "table"')
for tab in tables:
doRawQuery('DROP TABLE %s'%tab['name'])
cmd = 'sqlite3 recipes.db < %s'%versionF
os.system(cmd)
return getstatus()
@app.route('/admin/backup')
def adminbackup():
now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
dobackup(now+'.sql')
return getstatus()
def dobackup(name):
dbF = open(os.path.join(DBBACKUPPATH, name), 'w')
con = get_db()
dbF.write('\n'.join(con.iterdump()).encode('utf8'))
dbF.close()
@app.route('/admin/status')
def getstatus():
status = {}
status['num_of_recipes'] = queryDbFetchOne('SELECT count(*) as rows FROM recipes')['rows']
status['num_of_fridge'] = queryDbFetchOne('SELECT count(*) as rows FROM fridge')['rows']
status['num_of_ingredients'] = queryDbFetchOne('SELECT count(*) as rows FROM (SELECT DISTINCT ingredient FROM recipeAmount)')['rows']
status['backups'] = sorted(os.listdir(DBBACKUPPATH), reverse=True)
return json.dumps(status, sort_keys=True, indent=4, separators=(',', ': '))
@app.route('/admin')
def adminpage():
values = {'pageId': 'adminPage',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('admin.html', **values)
if __name__ == "__main__":
# import logging
# file_handler = RotatingFileHandler('/tmp/receptakuten.log', bakupCount=5)
# file_handler.setLevel(logging.WARNING)
# app.logger.addHandler(file_handler)
app.run(host="0.0.0.0", debug=True)
# app.run(debug=True)
| # -*- coding: iso-8859-1 -*-
import os
import shutil
import datetime
import sqlite3
from flask import Flask, request, session, render_template, g, redirect, url_for, abort, flash, make_response
from random import randint
import json
import urllib2
import json
from json.decoder import JSONObject
from werkzeug.utils import secure_filename
UPLOAD_FOLDER = '/tmp'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
DBBACKUPPATH = os.path.abspath('db_backup')
if os.path.exists(DBBACKUPPATH) == False:
os.mkdir(DBBACKUPPATH)
app = Flask(__name__)
#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
app = Flask(__name__)
app.config.from_object(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'recipes.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='<PASSWORD>',
UPLOAD_FOLDER='/tmp'
))
app.config['UPPLOAD_FOLDER'] = '/tmp'
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
"""Connects to the specific database."""
if os.path.exists(app.config['DATABASE']) == False:
cmd = 'sqlite3 recipes.db < database.sql'
os.system(cmd)
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
def init_db():
db = get_db()
with app.open_resource('database.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def queryDbFetchOne(query):
"""Query database, return one result"""
db = get_db()
cur = db.cursor()
cur.execute(query)
return cur.fetchone()
def queryDbFetchAll(query):
"""Query database, return one result"""
db = get_db()
cur = db.cursor()
cur.execute(query)
return cur.fetchall()
def getRecipe(recipeKey):
"""Get recipe data"""
return queryDbFetchOne('SELECT * FROM recipes WHERE key="%s"'%recipeKey)
def getIngredients(recipeKey):
"""Get all ingredients for a recipe"""
return queryDbFetchAll('SELECT * FROM recipeAmount WHERE recipeKey="%s"'%recipeKey)
def getNextKey():
"""Get next number for key"""
currentHighKey = queryDbFetchOne('SELECT key FROM recipes ORDER BY key DESC')
if currentHighKey is None:
print "IS none %s"%currentHighKey
currentHighKey = 0
else:
currentHighKey = int(currentHighKey[0])
return currentHighKey +1
def insertIntoDb(table, names, values):
"""Insert into database"""
if len(values) != len(names):
return None
query = 'INSERT INTO %s (%s) VALUES(%s)'%(table, ', '.join(names), ', '.join(values))
rowId = None
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
finally:
return rowId
def doRawQuery(query):
"""Do a raw query"""
rowId = None
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
finally:
return rowId
def updateDb(table, names, values, where):
"""Update row in table"""
if len(values) != len(names):
return None
query = 'UPDATE %s SET '%(table)
qPairs = []
for name, value in zip(names,values):
qPairs.append('%s=%s'%(name,value))
query += ', '.join(x for x in qPairs)
query += ' %s'%where
rowId = None
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
finally:
return rowId
@app.route('/prepdb')
def prepdb():
"""Prepare database from json file"""
f = open('recipes.json','r')
buff = f.read()
recipes = json.loads(buff)
for item in recipes:
recipeKey = getNextKey()
rowId = insertIntoDb('recipes', ['key', 'title','instructions', 'portions'],
[recipeKey, '"%s"'%item['title'], '"%s"'%item['instructions'], item['portions']])
for ingredient in item['ingredients']:
keys = ingredient.keys()
keys.insert(0, 'recipeKey')
values = ingredient.values()
values.insert(0, recipeKey)
rId = insertIntoDb('recipeAmount', keys, values)
for group in item['recipeTag']:
insertIntoDb('recipeTag', ['recipeKey', 'group'], [recipeKey, '"%s"'%group])
if 'fridge' in item:
insertIntoDb('fridge', ['recipeKey', 'portions'], [recipeKey, item['fridge']])
print " Fridge %d"%item['fridge']
else:
print "No fridge"
return index()
@app.cli.command('initdb')
def initdb_command():
"""Initializes the database."""
init_db()
print 'Initialized the database.'
@app.route('/help')
def help():
values = {'pageId': 'help',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('help.html', **values)
@app.route('/')
def index():
values = {'pageId': 'index',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('index.html', **values)
# return redirect('login', code=304)
@app.route('/login', methods=['GET','POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != 'admin' or request.form['password'] != '<PASSWORD>':
error = 'Invalid Credentials. Please try again.'
else:
return redirect(url_for('favourite'), code=304)
values = {'pageId': 'index',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048),
'error': error
}
return render_template('login.html', **values)
@app.route('/editRecipe', methods=['GET'])
def editRecipe():
return newRecipe(request.args['recipeKey'])
@app.route('/deleteRecipe', methods=['GET'])
def deleteRecipe():
# TODO
if 'recipeKey' in request.args:
pass
pass
def deleteAmount(recipeKey):
query = 'DELETE FROM recipeAmount WHERE recipeKey=%s'%recipeKey
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
msg = "error in delete operation"
print msg
finally:
return rowId
@app.route('/newRecipe')
def newRecipe(recipeKey=None):
if recipeKey is not None:
recipe = getRecipe(recipeKey)
ingredients = getIngredients(recipeKey)
else:
recipe = None
ingredients = None
entries = queryDbFetchAll('SELECT name FROM ingredients ')
measurements = queryDbFetchAll('SELECT short FROM measurements ')
values = {'ingredientsList': entries,
'measurements':measurements,
'recipe':recipe,
'ingredients':ingredients,
'pageId': 'newRecipe',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('newRecipe.html', **values)
@app.route('/error')
def errorHtml():
values = {'pageId': 'error',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('error.html', **values)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/saveRecipe', methods=['POST'])
def saveRecipe():
# TODO add last update time
title = request.form['title']
names = ['title']
values = ['"%s"'%title]
if 'instructions' in request.form:
names.append('instructions')
values.append('"%s"'%request.form['instructions'])
if 'portions' in request.form:
names.append('portions')
values.append(request.form['portions'])
if 'recipeKey' in request.form:
recipeKey = request.form['recipeKey']
updateDb('recipes', names, values, 'WHERE key=%s'%recipeKey)
else:
recipeKey = getNextKey()
names.insert(0, 'key')
values.insert(0, '%d'%recipeKey)
if insertIntoDb('recipes', names, values) is None:
return json.dumps({'redirect':'false', 'result': 'Error creating recipe'})
amount = request.form.getlist('amount')
measurement = request.form.getlist('measurement')
ingredients = request.form.getlist('ingredient')
deleteAmount(recipeKey)
for a,m,i in zip(amount, measurement, ingredients):
names = ['recipeKey', 'ingredient', 'amount', 'measurement']
values = [str(recipeKey), '"%s"'%i, str(a), '"%s"'%m]
if insertIntoDb('recipeAmount', names, values) is None:
return json.dumps({'redirect':'false', 'result': 'Error creating recipe'})
return json.dumps({'redirect':True, 'url': '/show/recipe?recipe=%s'%recipeKey})
@app.route('/show/recipe', methods=['GET'])
def showRecipe():
recipeKey = request.args.get('recipe')
recipe = getRecipe(recipeKey)
return displayRecipe(recipe)
def displayRecipe(recipe):
values = {'key':recipe['key'],
'title': recipe['title'],
'instructions': recipe['instructions'],
'portions': recipe['portions'],
'ingredients': getIngredients(recipe['key']),
'pageId': 'displayRecipe',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('displayRecipe_template.html', **values)
@app.route('/randomRecipe', methods=['GET'])
def randomRecipe():
recipes = queryDbFetchAll('SELECT * FROM recipes ORDER BY RANDOM() LIMIT 4')
return render_template('listRecipes.html', header='Förslag:', lastRecipes=recipes)
@app.route('/menuSuggestion', methods=['GET'])
def menuSuggestion():
recipes = queryDbFetchAll('SELECT * FROM recipes ORDER BY RANDOM() LIMIT 4')
if 'update' in request.args:
return render_template('onlyList.html', lastRecipes=recipes)
values = {'pagetitle':'Receptakuten',
'title': 'Förslag:',
'lastRecipes': recipes,
'refresh': 'true',
'pageId': 'menuSuggestion',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('listRecipes.html', **values)
@app.route('/ajax/search', methods=['GET'])
def searchAjax():
if request.method == 'GET':
patterns = request.args.getlist('searchPatterns[]')
query = ''
for p in patterns:
if len(query) > 0:
query = '%s or '%query
query += 'title LIKE "%%%s%%" or instructions LIKE "%%%s%%"'%(p, p)
query = 'SELECT key, title FROM recipes WHERE %s LIMIT 10'%query
results = queryDbFetchAll(query)
t = []
for p in results:
h = {}
for k in p.keys():
h[k] = p[k]
t.append(h)
return json.dumps(t)
@app.route('/ajax/searchIngredient', methods=['GET'])
def searchIngredient():
if request.method == 'GET':
patterns = request.args.getlist('searchPatterns[]')
print patterns
query = ''
for p in patterns:
if len(query) > 0:
query = '%s or '%query
query += 'ingredient LIKE "%%%s%%"'%(p)
query = 'SELECT DISTINCT ingredient FROM recipeAmount WHERE %s'%query
print query
results = queryDbFetchAll(query)
t = []
for p in results:
h = {}
for k in p.keys():
h[k] = p[k]
t.append(h)
return json.dumps(t)
@app.route('/search')
def search():
values = {'pageId': 'search',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('search.html', **values)
def getFridgeJSON():
fridgeContent = queryDbFetchAll('SELECT key, title, fridge.portions AS portions FROM recipes INNER JOIN fridge ON recipes.key = fridge.recipeKey')
fridgeJson = []
for row in fridgeContent:
rowJson = {}
for key in row.keys():
rowJson[key] = row[key]
fridgeJson.append(rowJson)
return json.dumps(fridgeJson)
@app.route('/fromTheFridge')
def fromTheFridge():
values = {'pageId': 'fromTheFridge',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('whatsinthefridge.html', **values)
# Update fridge content
@app.route('/ajax/updateFridge', methods=['GET','POST'])
def updateFridge():
if request.method == 'POST':
recipesJson = request.form.getlist('recipes')
recipes = json.loads(recipesJson[0])
keys = []
for item in recipes:
keys.append(item['key'])
queryUpdate = 'UPDATE fridge SET portions=%d WHERE recipeKey=%d'%(item['portions'], item['key'])
queryInsert = 'INSERT INTO fridge (recipeKey, portions) SELECT %d,%d WHERE(Select Changes() = 0)'%(item['key'], item['portions'])
doRawQuery(queryUpdate)
doRawQuery(queryInsert)
currentKeys = queryDbFetchAll('SELECT recipeKey FROM fridge ORDER BY recipeKey')
for key in currentKeys:
if key['recipeKey'] not in keys:
deleteQuery = 'DELETE FROM fridge WHERE recipeKey=%s'%key['recipeKey']
doRawQuery(deleteQuery)
return getFridgeJSON()
@app.route('/groceryList')
def groceryList():
recipes = queryDbFetchAll('SELECT key, title, portions FROM recipes ORDER BY title')
ingredients = {}
for recipe in recipes:
ingredients[recipe['key']] = getIngredients(recipe['key'])
values = {'pageId': 'groceryList',
'recipes': recipes,
'ingredients': ingredients,
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('groceryList.html', **values)
@app.route('/favourite')
def favourite():
"""Show favourite recipes"""
values = {'pageId': 'favouritePage',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('favourite.html', **values)
@app.route('/ajax/getRecipesJson', methods=['GET','POST'])
def getRecipesJson():
if request.method == 'POST':
recipeKeys = request.form.getlist('recipe')
query = 'SELECT * FROM recipes where '
qyeryKeys = []
for recipes in recipeKeys:
jsonKeys = json.loads(recipes)
for key in jsonKeys:
qyeryKeys.append('key=%s'%key['recipeKey'])
query += ' OR '.join(qyeryKeys)
recipeList = queryDbFetchAll(query)
jsonReply = []
for rowRecipe in recipeList:
tmpJson = {}
for key in rowRecipe.keys():
tmpJson[key] = rowRecipe[key]
ingredientsJson = []
for row in getIngredients(rowRecipe['key']):
tmpIngredient = {}
for key in row.keys():
if key == 'recipeKey':
continue
tmpIngredient[key] = row[key]
ingredientsJson.append(tmpIngredient)
tmpJson['ingredients'] = ingredientsJson
jsonReply.append(tmpJson)
return json.dumps(jsonReply)
recipes = queryDbFetchAll('SELECT key, title FROM recipes')
rows = []
for i in recipes:
rows.append(dict(i))
return json.dumps(rows)
@app.route('/manifest.json')
def manifestJSON():
return url_for('static', filename='manifest.json')
@app.route('/manifest.appcache')
def manifest():
res = make_response(render_template('manifest.appcache'), 200)
res.headers["Content-Type"] = "text/cache-manifest"
return res
@app.route('/admin/restore', methods = ['POST'])
def dorestore():
versionF = os.path.abspath(os.path.join(DBBACKUPPATH, request.form.get('version')))
if os.path.exists(versionF):
now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
name = '%s_bfrestore.sql'%now
dobackup(name)
tables = queryDbFetchAll('SELECT name FROM sqlite_master WHERE type = "table"')
for tab in tables:
doRawQuery('DROP TABLE %s'%tab['name'])
cmd = 'sqlite3 recipes.db < %s'%versionF
os.system(cmd)
return getstatus()
@app.route('/admin/backup')
def adminbackup():
now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
dobackup(now+'.sql')
return getstatus()
def dobackup(name):
dbF = open(os.path.join(DBBACKUPPATH, name), 'w')
con = get_db()
dbF.write('\n'.join(con.iterdump()).encode('utf8'))
dbF.close()
@app.route('/admin/status')
def getstatus():
status = {}
status['num_of_recipes'] = queryDbFetchOne('SELECT count(*) as rows FROM recipes')['rows']
status['num_of_fridge'] = queryDbFetchOne('SELECT count(*) as rows FROM fridge')['rows']
status['num_of_ingredients'] = queryDbFetchOne('SELECT count(*) as rows FROM (SELECT DISTINCT ingredient FROM recipeAmount)')['rows']
status['backups'] = sorted(os.listdir(DBBACKUPPATH), reverse=True)
return json.dumps(status, sort_keys=True, indent=4, separators=(',', ': '))
@app.route('/admin')
def adminpage():
values = {'pageId': 'adminPage',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('admin.html', **values)
if __name__ == "__main__":
# import logging
# file_handler = RotatingFileHandler('/tmp/receptakuten.log', bakupCount=5)
# file_handler.setLevel(logging.WARNING)
# app.logger.addHandler(file_handler)
app.run(host="0.0.0.0", debug=True)
# app.run(debug=True) | en | 0.484113 | # -*- coding: iso-8859-1 -*- #app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER # Load default config and override config from an environment variable Connects to the specific database. Opens a new database connection if there is none yet for the current application context. Closes the database again at the end of the request. Query database, return one result Query database, return one result Get recipe data Get all ingredients for a recipe Get next number for key Insert into database Do a raw query Update row in table Prepare database from json file Initializes the database. # return redirect('login', code=304) # TODO # TODO add last update time # Update fridge content Show favourite recipes # import logging # file_handler = RotatingFileHandler('/tmp/receptakuten.log', bakupCount=5) # file_handler.setLevel(logging.WARNING) # app.logger.addHandler(file_handler) # app.run(debug=True) | 2.152878 | 2 |
tests/bugs/core_2678_test.py | FirebirdSQL/firebird-qa | 1 | 10626 | <filename>tests/bugs/core_2678_test.py
#coding:utf-8
#
# id: bugs.core_2678
# title: Full outer join cannot use available indices (very slow execution)
# decription:
# tracker_id: CORE-2678
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
create table td_data1 (
c1 varchar(20) character set win1251 not null collate win1251,
c2 integer not null,
c3 date not null,
d1 float not null
);
create index idx_td_data1 on td_data1(c1,c2,c3);
commit;
create table td_data2 (
c1 varchar(20) character set win1251 not null collate win1251,
c2 integer not null,
c3 date not null,
d2 float not null
);
create index idx_td_data2 on td_data2(c1,c2,c3);
commit;
set planonly;
select
d1.c1, d2.c1,
d1.c2, d2.c2,
d1.c3, d2.c3,
coalesce(sum(d1.d1), 0) t1,
coalesce(sum(d2.d2), 0) t2
from td_data1 d1
full join td_data2 d2
on
d2.c1 = d1.c1
and d2.c2 = d1.c2
and d2.c3 = d1.c3
group by
d1.c1, d2.c1,
d1.c2, d2.c2,
d1.c3, d2.c3;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
PLAN SORT (JOIN (JOIN (D2 NATURAL, D1 INDEX (IDX_TD_DATA1)), JOIN (D1 NATURAL, D2 INDEX (IDX_TD_DATA2))))
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| <filename>tests/bugs/core_2678_test.py
#coding:utf-8
#
# id: bugs.core_2678
# title: Full outer join cannot use available indices (very slow execution)
# decription:
# tracker_id: CORE-2678
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
create table td_data1 (
c1 varchar(20) character set win1251 not null collate win1251,
c2 integer not null,
c3 date not null,
d1 float not null
);
create index idx_td_data1 on td_data1(c1,c2,c3);
commit;
create table td_data2 (
c1 varchar(20) character set win1251 not null collate win1251,
c2 integer not null,
c3 date not null,
d2 float not null
);
create index idx_td_data2 on td_data2(c1,c2,c3);
commit;
set planonly;
select
d1.c1, d2.c1,
d1.c2, d2.c2,
d1.c3, d2.c3,
coalesce(sum(d1.d1), 0) t1,
coalesce(sum(d2.d2), 0) t2
from td_data1 d1
full join td_data2 d2
on
d2.c1 = d1.c1
and d2.c2 = d1.c2
and d2.c3 = d1.c3
group by
d1.c1, d2.c1,
d1.c2, d2.c2,
d1.c3, d2.c3;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
PLAN SORT (JOIN (JOIN (D2 NATURAL, D1 INDEX (IDX_TD_DATA1)), JOIN (D1 NATURAL, D2 INDEX (IDX_TD_DATA2))))
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| en | 0.36263 | #coding:utf-8 # # id: bugs.core_2678 # title: Full outer join cannot use available indices (very slow execution) # decription: # tracker_id: CORE-2678 # min_versions: ['3.0'] # versions: 3.0 # qmid: None # version: 3.0 # resources: None create table td_data1 ( c1 varchar(20) character set win1251 not null collate win1251, c2 integer not null, c3 date not null, d1 float not null ); create index idx_td_data1 on td_data1(c1,c2,c3); commit; create table td_data2 ( c1 varchar(20) character set win1251 not null collate win1251, c2 integer not null, c3 date not null, d2 float not null ); create index idx_td_data2 on td_data2(c1,c2,c3); commit; set planonly; select d1.c1, d2.c1, d1.c2, d2.c2, d1.c3, d2.c3, coalesce(sum(d1.d1), 0) t1, coalesce(sum(d2.d2), 0) t2 from td_data1 d1 full join td_data2 d2 on d2.c1 = d1.c1 and d2.c2 = d1.c2 and d2.c3 = d1.c3 group by d1.c1, d2.c1, d1.c2, d2.c2, d1.c3, d2.c3; PLAN SORT (JOIN (JOIN (D2 NATURAL, D1 INDEX (IDX_TD_DATA1)), JOIN (D1 NATURAL, D2 INDEX (IDX_TD_DATA2)))) | 1.624155 | 2 |
application/model/entity/category.py | UniversidadeDeVassouras/labproghiper-2020.1-MatheusTelles-p1 | 1 | 10627 | <filename>application/model/entity/category.py<gh_stars>1-10
from flask import current_app
class Category:
def __init__(self, id, name, description, thumb):
self._id = id
self._name = name
self._description = description
def setId(self, id):
self._id = id
def getId(self):
return self._id
def setName(self, name):
self._name = name
def getName(self):
return self._name
def setDescription(self, description):
self._description = description
def getDescription(self):
return self._description
def setThumb(self, thumb):
self._thumb = thumb
def getThumb(self):
return self._thumb
def get_video_category_id (self):
videos = current_app.config ['videos']
videos_category = []
for i, video in enumerate (videos.get_video_list()):
if video.getCategory_id () == self.getId():
videos_category.append (video)
return videos_category | <filename>application/model/entity/category.py<gh_stars>1-10
from flask import current_app
class Category:
def __init__(self, id, name, description, thumb):
self._id = id
self._name = name
self._description = description
def setId(self, id):
self._id = id
def getId(self):
return self._id
def setName(self, name):
self._name = name
def getName(self):
return self._name
def setDescription(self, description):
self._description = description
def getDescription(self):
return self._description
def setThumb(self, thumb):
self._thumb = thumb
def getThumb(self):
return self._thumb
def get_video_category_id (self):
videos = current_app.config ['videos']
videos_category = []
for i, video in enumerate (videos.get_video_list()):
if video.getCategory_id () == self.getId():
videos_category.append (video)
return videos_category | none | 1 | 2.527833 | 3 |
|
cookietemple/create/templates/cli/cli_python/{{ cookiecutter.project_slug_no_hyphen }}/tests/__init__.py | e2jk/cookietemple | 117 | 10628 | <gh_stars>100-1000
"""Test suite for the {{ cookiecutter.project_slug_no_hyphen }} package."""
| """Test suite for the {{ cookiecutter.project_slug_no_hyphen }} package.""" | en | 0.669544 | Test suite for the {{ cookiecutter.project_slug_no_hyphen }} package. | 0.984817 | 1 |
bricks/ev3dev/modules/pybricks/robotics.py | ZPhilo/pybricks-micropython | 115 | 10629 | # SPDX-License-Identifier: MIT
# Copyright (c) 2018-2020 The Pybricks Authors
"""Pybricks robotics module."""
from _pybricks.robotics import DriveBase
| # SPDX-License-Identifier: MIT
# Copyright (c) 2018-2020 The Pybricks Authors
"""Pybricks robotics module."""
from _pybricks.robotics import DriveBase
| en | 0.264268 | # SPDX-License-Identifier: MIT # Copyright (c) 2018-2020 The Pybricks Authors Pybricks robotics module. | 1.132046 | 1 |
flask-app/web_app/storage_manager/storage_manager.py | PetrMokrov/back_end_project | 0 | 10630 | #!/usr/bin/env python
import psycopg2
import time
from ..models import User
class StorageManager:
def __init__(self):
self.conn = None
self._connect()
self._create_table()
def _connect(self):
while True:
try:
self.conn = psycopg2.connect(
host='storage',
database='app_storage',
user='admin',
password='<PASSWORD>'
)
except psycopg2.Error:
print('Cannot connect to database, sleeping 3 seconds')
time.sleep(3)
else:
break
def _create_table(self):
while True:
try:
cursor = self.conn.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS users \
(id SERIAL PRIMARY KEY, login VARCHAR(128), \
email VARCHAR(128), hash_password VARCHAR(<PASSWORD>), \
confirmed BOOLEAN)')
except psycopg2.Error:
print('Database error, reconnecting')
self._connect()
else:
break
def insert(self, user):
'''
If insert is success, the function returns true,
Else, it returns false
'''
while True:
try:
if self.select(user.login, category='login') is not None:
return False
cursor = self.conn.cursor()
cursor.execute('INSERT INTO users(login, email, hash_password, confirmed) \
VALUES (%s, %s, %s, %s)', (user.login, user.email, user.hash_password, user.confirmed))
self.conn.commit()
return True
except psycopg2.Error:
print('Database error, reconnecting')
time.sleep(1)
self._connect()
else:
break
def select(self, value, category='login'):
'''
The function returns None, if there is no user with very value of
category, else it returns User instance
'''
while True:
try:
cursor = self.conn.cursor()
cursor.execute('SELECT * FROM users WHERE %s = %%s' % category, (value,))
self.conn.commit()
fetch = cursor.fetchall()
if len(fetch) == 0:
return None
user = User(fetch[0][1], fetch[0][2])
user.id = fetch[0][0]
user.hash_password = fetch[0][3]
user.confirmed = fetch[0][4]
return user
except psycopg2.Error:
print('Database error, reconnecting')
time.sleep(1)
self._connect()
else:
break
def confirm(self, value, category='login'):
'''
The function sets \'confirmed\' parameter of the user with very value
of category as True\n
If such user not found, returns False, else returns True
'''
while True:
try:
if self.select(value, category=category) is not None:
cursor = self.conn.cursor()
cursor.execute('UPDATE users SET confirmed = TRUE WHERE %s = %%s' % category, (value,))
self.conn.commit()
return True
else:
return False
except psycopg2.Error:
print('Database error, reconnecting')
time.sleep(1)
self._connect()
else:
break
| #!/usr/bin/env python
import psycopg2
import time
from ..models import User
class StorageManager:
def __init__(self):
self.conn = None
self._connect()
self._create_table()
def _connect(self):
while True:
try:
self.conn = psycopg2.connect(
host='storage',
database='app_storage',
user='admin',
password='<PASSWORD>'
)
except psycopg2.Error:
print('Cannot connect to database, sleeping 3 seconds')
time.sleep(3)
else:
break
def _create_table(self):
while True:
try:
cursor = self.conn.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS users \
(id SERIAL PRIMARY KEY, login VARCHAR(128), \
email VARCHAR(128), hash_password VARCHAR(<PASSWORD>), \
confirmed BOOLEAN)')
except psycopg2.Error:
print('Database error, reconnecting')
self._connect()
else:
break
def insert(self, user):
'''
If insert is success, the function returns true,
Else, it returns false
'''
while True:
try:
if self.select(user.login, category='login') is not None:
return False
cursor = self.conn.cursor()
cursor.execute('INSERT INTO users(login, email, hash_password, confirmed) \
VALUES (%s, %s, %s, %s)', (user.login, user.email, user.hash_password, user.confirmed))
self.conn.commit()
return True
except psycopg2.Error:
print('Database error, reconnecting')
time.sleep(1)
self._connect()
else:
break
def select(self, value, category='login'):
'''
The function returns None, if there is no user with very value of
category, else it returns User instance
'''
while True:
try:
cursor = self.conn.cursor()
cursor.execute('SELECT * FROM users WHERE %s = %%s' % category, (value,))
self.conn.commit()
fetch = cursor.fetchall()
if len(fetch) == 0:
return None
user = User(fetch[0][1], fetch[0][2])
user.id = fetch[0][0]
user.hash_password = fetch[0][3]
user.confirmed = fetch[0][4]
return user
except psycopg2.Error:
print('Database error, reconnecting')
time.sleep(1)
self._connect()
else:
break
def confirm(self, value, category='login'):
'''
The function sets \'confirmed\' parameter of the user with very value
of category as True\n
If such user not found, returns False, else returns True
'''
while True:
try:
if self.select(value, category=category) is not None:
cursor = self.conn.cursor()
cursor.execute('UPDATE users SET confirmed = TRUE WHERE %s = %%s' % category, (value,))
self.conn.commit()
return True
else:
return False
except psycopg2.Error:
print('Database error, reconnecting')
time.sleep(1)
self._connect()
else:
break
| en | 0.540293 | #!/usr/bin/env python If insert is success, the function returns true, Else, it returns false The function returns None, if there is no user with very value of category, else it returns User instance The function sets \'confirmed\' parameter of the user with very value of category as True\n If such user not found, returns False, else returns True | 2.91116 | 3 |
custom/ahex.py | piyush1104/ColorHelper | 0 | 10631 | <reponame>piyush1104/ColorHelper
"""Custon color that looks for colors of format `#RRGGBBAA` as `#AARRGGBB`."""
from coloraide.css.colors import Color, SRGB
from coloraide.colors import _parse as parse
from coloraide import util
import copy
import re
class ASRGB(SRGB):
"""SRGB that looks for alpha first in hex format."""
MATCH = re.compile(r"(?i)\#(?:{hex}{{8}}|{hex}{{6}})\b".format(**parse.COLOR_PARTS))
@classmethod
def match(cls, string, start=0, fullmatch=True):
"""Match a CSS color string."""
m = cls.MATCH.match(string, start)
if m is not None and (not fullmatch or m.end(0) == len(string)):
return cls.split_channels(m.group(0)), m.end(0)
return None, None
@classmethod
def translate_channel(cls, channel, value):
"""Translate channel string."""
if -1 <= channel <= 2:
return parse.norm_hex_channel(value)
else:
raise ValueError("Unexpected channel index of '{}'".format(channel))
@classmethod
def split_channels(cls, color):
"""Split channels."""
if len(color) == 9:
return (
cls.translate_channel(0, "#" + color[3:5]),
cls.translate_channel(1, "#" + color[5:7]),
cls.translate_channel(2, "#" + color[7:]),
cls.translate_channel(-1, "#" + color[1:3]),
)
else:
return (
cls.translate_channel(0, "#" + color[1:3]),
cls.translate_channel(1, "#" + color[3:5]),
cls.translate_channel(2, "#" + color[5:]),
1.0
)
def to_string(
self, *, options=None, alpha=None, precision=util.DEF_PREC, fit=True, **kwargs
):
"""Convert to Hex format."""
if options is None:
options = {}
show_alpha = alpha is not False and (alpha is True or self.alpha < 1.0)
template = "#{:02x}{:02x}{:02x}{:02x}" if show_alpha else "#{:02x}{:02x}{:02x}"
if options.get("hex_upper"):
template = template.upper()
# Always fit hex
coords = self.fit_coords()
if show_alpha:
value = template.format(
int(util.round_half_up(self.alpha * 255.0)),
int(util.round_half_up(coords[0] * 255.0)),
int(util.round_half_up(coords[1] * 255.0)),
int(util.round_half_up(coords[2] * 255.0))
)
else:
value = template.format(
int(util.round_half_up(coords[0] * 255.0)),
int(util.round_half_up(coords[1] * 255.0)),
int(util.round_half_up(coords[2] * 255.0))
)
return value
class ColorAlphaHex(Color):
"""Color object whose sRGB color space looks for colors of format `#RRGGBBAA` as `#AARRGGBB`."""
CS_MAP = copy.copy(Color.CS_MAP)
CS_MAP["srgb"] = ASRGB
| """Custon color that looks for colors of format `#RRGGBBAA` as `#AARRGGBB`."""
from coloraide.css.colors import Color, SRGB
from coloraide.colors import _parse as parse
from coloraide import util
import copy
import re
class ASRGB(SRGB):
"""SRGB that looks for alpha first in hex format."""
MATCH = re.compile(r"(?i)\#(?:{hex}{{8}}|{hex}{{6}})\b".format(**parse.COLOR_PARTS))
@classmethod
def match(cls, string, start=0, fullmatch=True):
"""Match a CSS color string."""
m = cls.MATCH.match(string, start)
if m is not None and (not fullmatch or m.end(0) == len(string)):
return cls.split_channels(m.group(0)), m.end(0)
return None, None
@classmethod
def translate_channel(cls, channel, value):
"""Translate channel string."""
if -1 <= channel <= 2:
return parse.norm_hex_channel(value)
else:
raise ValueError("Unexpected channel index of '{}'".format(channel))
@classmethod
def split_channels(cls, color):
"""Split channels."""
if len(color) == 9:
return (
cls.translate_channel(0, "#" + color[3:5]),
cls.translate_channel(1, "#" + color[5:7]),
cls.translate_channel(2, "#" + color[7:]),
cls.translate_channel(-1, "#" + color[1:3]),
)
else:
return (
cls.translate_channel(0, "#" + color[1:3]),
cls.translate_channel(1, "#" + color[3:5]),
cls.translate_channel(2, "#" + color[5:]),
1.0
)
def to_string(
self, *, options=None, alpha=None, precision=util.DEF_PREC, fit=True, **kwargs
):
"""Convert to Hex format."""
if options is None:
options = {}
show_alpha = alpha is not False and (alpha is True or self.alpha < 1.0)
template = "#{:02x}{:02x}{:02x}{:02x}" if show_alpha else "#{:02x}{:02x}{:02x}"
if options.get("hex_upper"):
template = template.upper()
# Always fit hex
coords = self.fit_coords()
if show_alpha:
value = template.format(
int(util.round_half_up(self.alpha * 255.0)),
int(util.round_half_up(coords[0] * 255.0)),
int(util.round_half_up(coords[1] * 255.0)),
int(util.round_half_up(coords[2] * 255.0))
)
else:
value = template.format(
int(util.round_half_up(coords[0] * 255.0)),
int(util.round_half_up(coords[1] * 255.0)),
int(util.round_half_up(coords[2] * 255.0))
)
return value
class ColorAlphaHex(Color):
"""Color object whose sRGB color space looks for colors of format `#RRGGBBAA` as `#AARRGGBB`."""
CS_MAP = copy.copy(Color.CS_MAP)
CS_MAP["srgb"] = ASRGB | en | 0.750822 | Custon color that looks for colors of format `#RRGGBBAA` as `#AARRGGBB`. SRGB that looks for alpha first in hex format. #(?:{hex}{{8}}|{hex}{{6}})\b".format(**parse.COLOR_PARTS)) Match a CSS color string. Translate channel string. Split channels. Convert to Hex format. # Always fit hex Color object whose sRGB color space looks for colors of format `#RRGGBBAA` as `#AARRGGBB`. | 3.093964 | 3 |
source_code/terrain.py | Wiolarz/Console_PY_dungeon | 0 | 10632 | <reponame>Wiolarz/Console_PY_dungeon<filename>source_code/terrain.py
import random
import jobs
import balance
from economy import roman_numbers
class Earth:
def __init__(self):
self.current_day = 1
self.main_quest = None
self.amount_location = 7 # max 8
self.locations = []
#
self.location_names = []
def new_quest(self):
self.main_quest = jobs.Quest()
def generate_location(self):
x = 0
for place in range(self.amount_location):
self.locations.append(Location(place + 1, self.amount_location, x)) # level, overall location number
x += 1
def name_generator():
prefix = ["", "Green", "Dark", "Toxic", "Inferno", "Orc", "Goblin", "Dragon"]
core = ["Forest", "Cave", "Dungeon", "Town", "Village", "Mountains", "Graveyard"]
# suffix = ["", ""]
new_unique = False
new_name = ""
cheking_wrong_balance = 0
while not new_unique:
cheking_wrong_balance += 1
if cheking_wrong_balance > balance.world.amount_location * 5:
print("Error: cannot create random new location name")
exit(343)
new_name = prefix[random.randint(0, len(prefix)-1)] + " " + core[random.randint(0, len(core)-1)]
if new_name in balance.world.location_names:
new_unique = False
else:
new_unique = True
balance.world.location_names.append(new_name)
return new_name
class Location:
def __init__(self, location_level, amount, id_x):
self.id = id_x
self.name = name_generator()
self.level = location_level
self.quest_level = location_level + 2
if self.quest_level > balance.max_power:
self.quest_level = balance.max_power
self.chest_gold = location_level * balance.medium
self.density = 5 # number of events in location
self.chest_chance = 3 # %(10) chest chance
self.quest_enemy = 5 # %(10) chance of quest related enemy
self.location_names = []
self.amount_location = amount
def short_print(self):
return self.name + " " + roman_numbers(self.level)
| import random
import jobs
import balance
from economy import roman_numbers
class Earth:
def __init__(self):
self.current_day = 1
self.main_quest = None
self.amount_location = 7 # max 8
self.locations = []
#
self.location_names = []
def new_quest(self):
self.main_quest = jobs.Quest()
def generate_location(self):
x = 0
for place in range(self.amount_location):
self.locations.append(Location(place + 1, self.amount_location, x)) # level, overall location number
x += 1
def name_generator():
prefix = ["", "Green", "Dark", "Toxic", "Inferno", "Orc", "Goblin", "Dragon"]
core = ["Forest", "Cave", "Dungeon", "Town", "Village", "Mountains", "Graveyard"]
# suffix = ["", ""]
new_unique = False
new_name = ""
cheking_wrong_balance = 0
while not new_unique:
cheking_wrong_balance += 1
if cheking_wrong_balance > balance.world.amount_location * 5:
print("Error: cannot create random new location name")
exit(343)
new_name = prefix[random.randint(0, len(prefix)-1)] + " " + core[random.randint(0, len(core)-1)]
if new_name in balance.world.location_names:
new_unique = False
else:
new_unique = True
balance.world.location_names.append(new_name)
return new_name
class Location:
def __init__(self, location_level, amount, id_x):
self.id = id_x
self.name = name_generator()
self.level = location_level
self.quest_level = location_level + 2
if self.quest_level > balance.max_power:
self.quest_level = balance.max_power
self.chest_gold = location_level * balance.medium
self.density = 5 # number of events in location
self.chest_chance = 3 # %(10) chest chance
self.quest_enemy = 5 # %(10) chance of quest related enemy
self.location_names = []
self.amount_location = amount
def short_print(self):
return self.name + " " + roman_numbers(self.level) | en | 0.761645 | # max 8 # # level, overall location number # suffix = ["", ""] # number of events in location # %(10) chest chance # %(10) chance of quest related enemy | 3.591432 | 4 |
entrepreneurial_property/models/scientificpark.py | CzechInvest/ciis | 1 | 10633 | from django.db import models
from .media import Water
from .media import Electricity
from .media import Gas
from .media import WasteWater
from .media import Telecommunication
from .generic import Attachment
from .generic import Photo
from .generic import Location as EstateLocation
from cigeo.models import GenericNote as EstateNote
class ScientificParkTelecommunication(Telecommunication):
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkWasteWater(WasteWater):
diameter = capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkAttachment(Attachment):
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkPhoto(Photo):
green_field = models.ForeignKey(
"ScientificPark",
on_delete=models.CASCADE
)
pass
class ScientificParkTechnologicalWater(Water):
distance = None
diameter = None
capacity = None
well = None
well_capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkElectricity(Electricity):
distance = None
capacity = None
current = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkDrinkWater(Water):
distance = None
diameter = None
capacity = None
well = None
well_capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkGas(Gas):
diameter = pressure = capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkLocation(EstateLocation):
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkGenericNote(EstateNote):
green_field = models.ForeignKey(
"ScientificPark",
on_delete=models.CASCADE
)
| from django.db import models
from .media import Water
from .media import Electricity
from .media import Gas
from .media import WasteWater
from .media import Telecommunication
from .generic import Attachment
from .generic import Photo
from .generic import Location as EstateLocation
from cigeo.models import GenericNote as EstateNote
class ScientificParkTelecommunication(Telecommunication):
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkWasteWater(WasteWater):
diameter = capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkAttachment(Attachment):
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkPhoto(Photo):
green_field = models.ForeignKey(
"ScientificPark",
on_delete=models.CASCADE
)
pass
class ScientificParkTechnologicalWater(Water):
distance = None
diameter = None
capacity = None
well = None
well_capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkElectricity(Electricity):
distance = None
capacity = None
current = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkDrinkWater(Water):
distance = None
diameter = None
capacity = None
well = None
well_capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkGas(Gas):
diameter = pressure = capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkLocation(EstateLocation):
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkGenericNote(EstateNote):
green_field = models.ForeignKey(
"ScientificPark",
on_delete=models.CASCADE
)
| none | 1 | 2.130654 | 2 |
|
pusion/input_output/file_input_output.py | IPVS-AS/pusion | 5 | 10634 | <gh_stars>1-10
import json
import ntpath
import shutil
from pathlib import Path
import pickle5
def load_pickle_files_as_data(file_paths):
"""
Load pickle files containing decision outputs as an data array.
:param file_paths: A List of file paths to the individual pickle files.
:return: A data array.
"""
data = []
for file_path in file_paths:
with (open(file_path, "rb")) as handle:
data.append(pickle5.load(handle))
return data
def dump_pusion_data(data, file_path):
"""
Dump classification output data to the given file using pickle.
:param data: A data dictionary.
:param file_path: Location of the output pickle file.
"""
with open(file_path, "wb") as handle:
pickle5.dump(data, handle, protocol=pickle5.HIGHEST_PROTOCOL)
def dump_data_as_txt(data, name, identifier):
"""
Dump a data dictionary to the JSON file for a given evaluation unit.
:param data: A data dictionary.
:param name: The file name.
:param identifier: The identifier of the current evaluation unit (e.g. date/time).
"""
directory = "res/eval_" + identifier
Path(directory).mkdir(parents=True, exist_ok=True)
with open(directory + "/" + name + ".txt", 'w') as file:
file.write(json.dumps(data, indent=4))
def save(plot_instance, name, identifier):
"""
Save the plot instance for a given evaluation unit to the SVG and the PDF file, respectively.
:param plot_instance: `matplotlib.pyplot`-instance.
:param name: The file name.
:param identifier: The identifier of the current evaluation unit (e.g. date/time).
"""
directory = "res/eval_" + identifier
Path(directory).mkdir(parents=True, exist_ok=True)
plot_instance.savefig(directory + "/" + name + ".svg", bbox_inches="tight")
plot_instance.savefig(directory + "/" + name + ".pdf", bbox_inches="tight")
def save_evaluator(file, identifier):
"""
Save the evaluation script for a given evaluation unit.
:param file: The Python file. (E.g. referenced by __file__).
:param identifier: The identifier of the current evaluation unit (e.g. date/time).
"""
directory = "res/eval_" + identifier
Path(directory).mkdir(parents=True, exist_ok=True)
shutil.copy(file, directory + "/" + ntpath.basename(file) + ".txt")
| import json
import ntpath
import shutil
from pathlib import Path
import pickle5
def load_pickle_files_as_data(file_paths):
"""
Load pickle files containing decision outputs as an data array.
:param file_paths: A List of file paths to the individual pickle files.
:return: A data array.
"""
data = []
for file_path in file_paths:
with (open(file_path, "rb")) as handle:
data.append(pickle5.load(handle))
return data
def dump_pusion_data(data, file_path):
"""
Dump classification output data to the given file using pickle.
:param data: A data dictionary.
:param file_path: Location of the output pickle file.
"""
with open(file_path, "wb") as handle:
pickle5.dump(data, handle, protocol=pickle5.HIGHEST_PROTOCOL)
def dump_data_as_txt(data, name, identifier):
"""
Dump a data dictionary to the JSON file for a given evaluation unit.
:param data: A data dictionary.
:param name: The file name.
:param identifier: The identifier of the current evaluation unit (e.g. date/time).
"""
directory = "res/eval_" + identifier
Path(directory).mkdir(parents=True, exist_ok=True)
with open(directory + "/" + name + ".txt", 'w') as file:
file.write(json.dumps(data, indent=4))
def save(plot_instance, name, identifier):
"""
Save the plot instance for a given evaluation unit to the SVG and the PDF file, respectively.
:param plot_instance: `matplotlib.pyplot`-instance.
:param name: The file name.
:param identifier: The identifier of the current evaluation unit (e.g. date/time).
"""
directory = "res/eval_" + identifier
Path(directory).mkdir(parents=True, exist_ok=True)
plot_instance.savefig(directory + "/" + name + ".svg", bbox_inches="tight")
plot_instance.savefig(directory + "/" + name + ".pdf", bbox_inches="tight")
def save_evaluator(file, identifier):
"""
Save the evaluation script for a given evaluation unit.
:param file: The Python file. (E.g. referenced by __file__).
:param identifier: The identifier of the current evaluation unit (e.g. date/time).
"""
directory = "res/eval_" + identifier
Path(directory).mkdir(parents=True, exist_ok=True)
shutil.copy(file, directory + "/" + ntpath.basename(file) + ".txt") | en | 0.665806 | Load pickle files containing decision outputs as an data array. :param file_paths: A List of file paths to the individual pickle files. :return: A data array. Dump classification output data to the given file using pickle. :param data: A data dictionary. :param file_path: Location of the output pickle file. Dump a data dictionary to the JSON file for a given evaluation unit. :param data: A data dictionary. :param name: The file name. :param identifier: The identifier of the current evaluation unit (e.g. date/time). Save the plot instance for a given evaluation unit to the SVG and the PDF file, respectively. :param plot_instance: `matplotlib.pyplot`-instance. :param name: The file name. :param identifier: The identifier of the current evaluation unit (e.g. date/time). Save the evaluation script for a given evaluation unit. :param file: The Python file. (E.g. referenced by __file__). :param identifier: The identifier of the current evaluation unit (e.g. date/time). | 3.149871 | 3 |
meiduo_mall/meiduo_mall/apps/meiduo_admin/views/spus.py | aGrass0825/meiduo_project | 0 | 10635 | from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAdminUser
from goods.models import SPU, SPUSpecification
from meiduo_admin.serializers.spus import SPUSimpleSerializer, SPUSpecSerializer
class SPUSimpleView(ListAPIView):
permission_classes = [IsAdminUser]
queryset = SPU.objects.all()
serializer_class = SPUSimpleSerializer
# GET/meiduo_admin/goods/(?P<pk>\d+)/specs/
class SPUSpecView(ListAPIView):
"""获取SPU商品的规格选项数据"""
permission_classes = [IsAdminUser]
# 指定视图类所使用的查询集
def get_queryset(self):
pk = self.kwargs['pk']
specs = SPUSpecification.objects.filter(spu_id=pk)
return specs
# 指定视图类所使用的序列化器类
serializer_class = SPUSpecSerializer
| from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAdminUser
from goods.models import SPU, SPUSpecification
from meiduo_admin.serializers.spus import SPUSimpleSerializer, SPUSpecSerializer
class SPUSimpleView(ListAPIView):
permission_classes = [IsAdminUser]
queryset = SPU.objects.all()
serializer_class = SPUSimpleSerializer
# GET/meiduo_admin/goods/(?P<pk>\d+)/specs/
class SPUSpecView(ListAPIView):
"""获取SPU商品的规格选项数据"""
permission_classes = [IsAdminUser]
# 指定视图类所使用的查询集
def get_queryset(self):
pk = self.kwargs['pk']
specs = SPUSpecification.objects.filter(spu_id=pk)
return specs
# 指定视图类所使用的序列化器类
serializer_class = SPUSpecSerializer
| zh | 0.911989 | # GET/meiduo_admin/goods/(?P<pk>\d+)/specs/ 获取SPU商品的规格选项数据 # 指定视图类所使用的查询集 # 指定视图类所使用的序列化器类 | 2.151874 | 2 |
Scientific Computing with Python/Probability Calculator/prob_calculator.py | Fradxyz/FCCProjects | 0 | 10636 | <gh_stars>0
# Hacked by Ry2uko :D
import copy
import random
# Consider using the modules imported above.
class Hat:
def __init__(self, **balls):
self.contents = []
for color in balls:
for n in range(0,balls[color]):
self.contents.append(color)
def draw(self, num):
drawn = []
if num >= len(self.contents):
return self.contents
for n in range(0, num):
if len(self.contents) == 0:
break
randindex = random.randint(0, len(self.contents)-1)
drawn.append(self.contents.pop(randindex))
return drawn
def experiment(hat, expected_balls, num_balls_drawn, num_experiments):
m = 0
for count in range(0, num_experiments):
hat_copy = copy.deepcopy(hat)
drawn = hat_copy.draw(num_balls_drawn)
valid = True
for color in expected_balls:
if expected_balls[color] > drawn.count(color):
valid = False
break
if valid:
m += 1
return m / num_experiments
if __name__ == '__main__':
# Test here
pass | # Hacked by Ry2uko :D
import copy
import random
# Consider using the modules imported above.
class Hat:
def __init__(self, **balls):
self.contents = []
for color in balls:
for n in range(0,balls[color]):
self.contents.append(color)
def draw(self, num):
drawn = []
if num >= len(self.contents):
return self.contents
for n in range(0, num):
if len(self.contents) == 0:
break
randindex = random.randint(0, len(self.contents)-1)
drawn.append(self.contents.pop(randindex))
return drawn
def experiment(hat, expected_balls, num_balls_drawn, num_experiments):
m = 0
for count in range(0, num_experiments):
hat_copy = copy.deepcopy(hat)
drawn = hat_copy.draw(num_balls_drawn)
valid = True
for color in expected_balls:
if expected_balls[color] > drawn.count(color):
valid = False
break
if valid:
m += 1
return m / num_experiments
if __name__ == '__main__':
# Test here
pass | en | 0.698494 | # Hacked by Ry2uko :D # Consider using the modules imported above. # Test here | 3.472106 | 3 |
src/frames/add_quantity_frame.py | GolovPavel/ValueConverter | 1 | 10637 | import tkinter as tk
from tkinter.messagebox import showerror
from constants.frames import MAIN_FRAME_NAME
from util import add_new_quantity
class AddQuantityFrame(tk.Frame):
def __init__(self, root, controller):
tk.Frame.__init__(self, root)
self.controller = controller
self.main_label = tk.Label(self, text="Добавление новой величины", font="Helvetica 30 bold")
self.main_label.pack(pady=50)
self.info_label = tk.Label(self, text="Введите название величины", font="Helvetica 20")
self.info_label.pack(pady=40)
self.quantity_name_entry = tk.Entry(self, width=24)
self.quantity_name_entry.pack()
self.add_button = tk.Button(self, text="Добавить величину", width=20, height=3, command=self.__add_quantity)
self.add_button.pack(pady=40)
self.back_button = tk.Button(self, text="Назад", width=20, height=3,
command=lambda: self.controller.show_frame(MAIN_FRAME_NAME))
self.back_button.pack()
def __add_quantity(self):
quantity_name = self.quantity_name_entry.get()
if quantity_name == "":
showerror("Название величины", "Введите название величины")
return
if len(quantity_name) > 30:
showerror("Длинное название", "Название величины может содержать не более 30 символов")
return
add_new_quantity(quantity_name)
self.controller.show_frame(MAIN_FRAME_NAME)
def render(self):
self.clear()
def clear(self):
self.quantity_name_entry.delete(0, tk.END)
| import tkinter as tk
from tkinter.messagebox import showerror
from constants.frames import MAIN_FRAME_NAME
from util import add_new_quantity
class AddQuantityFrame(tk.Frame):
def __init__(self, root, controller):
tk.Frame.__init__(self, root)
self.controller = controller
self.main_label = tk.Label(self, text="Добавление новой величины", font="Helvetica 30 bold")
self.main_label.pack(pady=50)
self.info_label = tk.Label(self, text="Введите название величины", font="Helvetica 20")
self.info_label.pack(pady=40)
self.quantity_name_entry = tk.Entry(self, width=24)
self.quantity_name_entry.pack()
self.add_button = tk.Button(self, text="Добавить величину", width=20, height=3, command=self.__add_quantity)
self.add_button.pack(pady=40)
self.back_button = tk.Button(self, text="Назад", width=20, height=3,
command=lambda: self.controller.show_frame(MAIN_FRAME_NAME))
self.back_button.pack()
def __add_quantity(self):
quantity_name = self.quantity_name_entry.get()
if quantity_name == "":
showerror("Название величины", "Введите название величины")
return
if len(quantity_name) > 30:
showerror("Длинное название", "Название величины может содержать не более 30 символов")
return
add_new_quantity(quantity_name)
self.controller.show_frame(MAIN_FRAME_NAME)
def render(self):
self.clear()
def clear(self):
self.quantity_name_entry.delete(0, tk.END)
| none | 1 | 3.412262 | 3 |
|
setup.py | vwxyzjn/pysc2gym | 6 | 10638 | from setuptools import setup
import versioneer
setup(name='gym_pysc2',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
install_requires=['gym'] # And any other dependencies foo needs
) | from setuptools import setup
import versioneer
setup(name='gym_pysc2',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
install_requires=['gym'] # And any other dependencies foo needs
) | en | 0.720654 | # And any other dependencies foo needs | 1.340532 | 1 |
Compressed downloads/server.py | Aldair47x/aa | 0 | 10639 | <reponame>Aldair47x/aa
import zmq
import sys
import os
import math
def loadFiles(path):
files = {}
dataDir = os.fsencode(path)
for file in os.listdir(dataDir):
filename = os.fsdecode(file)
print("Loading {}".format(filename))
files[filename] = file
return files
def main():
if len(sys.argv) != 3:
print("Error")
exit()
directory = sys.argv[2]
port = sys.argv[1]
context = zmq.Context()
s = context.socket(zmq.REP)
s.bind("tcp://*:{}".format(port))
files = loadFiles(directory)
while True:
msg = s.recv_json()
if msg["op"] == "list":
s.send_json({"files": list(files.keys())})
elif msg["op"] == "download":
size = 1024*1024*32
filename = msg["file"]
if filename in files:
if not "part" in msg:
file = os.stat(directory + "/" +filename)
s.send_json({"parts": math.ceil(file[6]/size)})
else:
with open(directory + "/" +filename, "rb") as input:
input.seek(size * int(msg["part"]))
data = input.read(size)
s.send(data)
else:
s.send_string("Song does not exits! Marranito")
else:
print("Unsupported action!")
if __name__ == '__main__':
main()
| import zmq
import sys
import os
import math
def loadFiles(path):
files = {}
dataDir = os.fsencode(path)
for file in os.listdir(dataDir):
filename = os.fsdecode(file)
print("Loading {}".format(filename))
files[filename] = file
return files
def main():
if len(sys.argv) != 3:
print("Error")
exit()
directory = sys.argv[2]
port = sys.argv[1]
context = zmq.Context()
s = context.socket(zmq.REP)
s.bind("tcp://*:{}".format(port))
files = loadFiles(directory)
while True:
msg = s.recv_json()
if msg["op"] == "list":
s.send_json({"files": list(files.keys())})
elif msg["op"] == "download":
size = 1024*1024*32
filename = msg["file"]
if filename in files:
if not "part" in msg:
file = os.stat(directory + "/" +filename)
s.send_json({"parts": math.ceil(file[6]/size)})
else:
with open(directory + "/" +filename, "rb") as input:
input.seek(size * int(msg["part"]))
data = input.read(size)
s.send(data)
else:
s.send_string("Song does not exits! Marranito")
else:
print("Unsupported action!")
if __name__ == '__main__':
main() | none | 1 | 2.490875 | 2 |
|
metric_learn/nca.py | ogrisel/metric-learn | 0 | 10640 | """
Neighborhood Components Analysis (NCA)
Ported to Python from https://github.com/vomjom/nca
"""
from __future__ import absolute_import
import numpy as np
from six.moves import xrange
from sklearn.utils.validation import check_X_y
from .base_metric import BaseMetricLearner
EPS = np.finfo(float).eps
class NCA(BaseMetricLearner):
def __init__(self, num_dims=None, max_iter=100, learning_rate=0.01):
self.num_dims = num_dims
self.max_iter = max_iter
self.learning_rate = learning_rate
def transformer(self):
return self.A_
def fit(self, X, y):
"""
X: data matrix, (n x d)
y: scalar labels, (n)
"""
X, labels = check_X_y(X, y)
n, d = X.shape
num_dims = self.num_dims
if num_dims is None:
num_dims = d
# Initialize A to a scaling matrix
A = np.zeros((num_dims, d))
np.fill_diagonal(A, 1./(np.maximum(X.max(axis=0)-X.min(axis=0), EPS)))
# Run NCA
dX = X[:,None] - X[None] # shape (n, n, d)
tmp = np.einsum('...i,...j->...ij', dX, dX) # shape (n, n, d, d)
masks = labels[:,None] == labels[None]
for it in xrange(self.max_iter):
for i, label in enumerate(labels):
mask = masks[i]
Ax = A.dot(X.T).T # shape (n, num_dims)
softmax = np.exp(-((Ax[i] - Ax)**2).sum(axis=1)) # shape (n)
softmax[i] = 0
softmax /= softmax.sum()
t = softmax[:, None, None] * tmp[i] # shape (n, d, d)
d = softmax[mask].sum() * t.sum(axis=0) - t[mask].sum(axis=0)
A += self.learning_rate * A.dot(d)
self.X_ = X
self.A_ = A
self.n_iter_ = it
return self
| """
Neighborhood Components Analysis (NCA)
Ported to Python from https://github.com/vomjom/nca
"""
from __future__ import absolute_import
import numpy as np
from six.moves import xrange
from sklearn.utils.validation import check_X_y
from .base_metric import BaseMetricLearner
EPS = np.finfo(float).eps
class NCA(BaseMetricLearner):
def __init__(self, num_dims=None, max_iter=100, learning_rate=0.01):
self.num_dims = num_dims
self.max_iter = max_iter
self.learning_rate = learning_rate
def transformer(self):
return self.A_
def fit(self, X, y):
"""
X: data matrix, (n x d)
y: scalar labels, (n)
"""
X, labels = check_X_y(X, y)
n, d = X.shape
num_dims = self.num_dims
if num_dims is None:
num_dims = d
# Initialize A to a scaling matrix
A = np.zeros((num_dims, d))
np.fill_diagonal(A, 1./(np.maximum(X.max(axis=0)-X.min(axis=0), EPS)))
# Run NCA
dX = X[:,None] - X[None] # shape (n, n, d)
tmp = np.einsum('...i,...j->...ij', dX, dX) # shape (n, n, d, d)
masks = labels[:,None] == labels[None]
for it in xrange(self.max_iter):
for i, label in enumerate(labels):
mask = masks[i]
Ax = A.dot(X.T).T # shape (n, num_dims)
softmax = np.exp(-((Ax[i] - Ax)**2).sum(axis=1)) # shape (n)
softmax[i] = 0
softmax /= softmax.sum()
t = softmax[:, None, None] * tmp[i] # shape (n, d, d)
d = softmax[mask].sum() * t.sum(axis=0) - t[mask].sum(axis=0)
A += self.learning_rate * A.dot(d)
self.X_ = X
self.A_ = A
self.n_iter_ = it
return self
| en | 0.797032 | Neighborhood Components Analysis (NCA) Ported to Python from https://github.com/vomjom/nca X: data matrix, (n x d) y: scalar labels, (n) # Initialize A to a scaling matrix # Run NCA # shape (n, n, d) # shape (n, n, d, d) # shape (n, num_dims) # shape (n) # shape (n, d, d) | 2.256465 | 2 |
rl_repr/batch_rl/evaluation.py | xxdreck/google-research | 23,901 | 10641 | <reponame>xxdreck/google-research<filename>rl_repr/batch_rl/evaluation.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy evaluation."""
import typing
import tensorflow.compat.v2 as tf
def evaluate(
env,
policy,
num_episodes = 10,
ctx_length = None,
embed_training_window = None,
state_mask_fn = None, # pylint: disable=g-bare-generic
):
"""Evaluates the policy.
Args:
env: Environment to evaluate the policy on.
policy: Policy to evaluate.
num_episodes: A number of episodes to average the policy on.
ctx_length: number of previous steps to compute context from.
embed_training_window: window size used during embed training.
state_mask_fn: state masking function for partially obs envs.
Returns:
Averaged reward and a total number of steps.
"""
total_timesteps = 0
total_returns = 0.0
def apply_mask(observation):
if state_mask_fn:
return tf.convert_to_tensor(state_mask_fn(observation.numpy()))
return observation
for _ in range(num_episodes):
timestep = env.reset()
if ctx_length:
states = [apply_mask(timestep.observation) for _ in range(ctx_length)]
actions = [
tf.zeros(policy.action_spec.shape)[None, :] for _ in range(ctx_length)
]
rewards = [[0.] for _ in range(ctx_length)]
latent_action = None
i = 0
while not timestep.is_last():
if embed_training_window and (i % embed_training_window == 0 or
embed_training_window <= 2):
latent_action = None
if ctx_length:
states.append(apply_mask(timestep.observation))
if len(states) > ctx_length:
states.pop(0)
actions.pop(0)
rewards.pop(0)
action = policy.act(
tf.stack(states, axis=1),
actions=tf.stack(actions, axis=1),
rewards=tf.stack(rewards, axis=1))
actions.append(action)
else:
if embed_training_window:
action, latent_action = policy.act(
apply_mask(timestep.observation), latent_action=latent_action)
else:
action = policy.act(apply_mask(timestep.observation))
timestep = env.step(action)
if ctx_length:
rewards.append(timestep.reward)
total_returns += timestep.reward[0]
total_timesteps += 1
i += 1
return total_returns / num_episodes, total_timesteps / num_episodes
| # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy evaluation."""
import typing
import tensorflow.compat.v2 as tf
def evaluate(
env,
policy,
num_episodes = 10,
ctx_length = None,
embed_training_window = None,
state_mask_fn = None, # pylint: disable=g-bare-generic
):
"""Evaluates the policy.
Args:
env: Environment to evaluate the policy on.
policy: Policy to evaluate.
num_episodes: A number of episodes to average the policy on.
ctx_length: number of previous steps to compute context from.
embed_training_window: window size used during embed training.
state_mask_fn: state masking function for partially obs envs.
Returns:
Averaged reward and a total number of steps.
"""
total_timesteps = 0
total_returns = 0.0
def apply_mask(observation):
if state_mask_fn:
return tf.convert_to_tensor(state_mask_fn(observation.numpy()))
return observation
for _ in range(num_episodes):
timestep = env.reset()
if ctx_length:
states = [apply_mask(timestep.observation) for _ in range(ctx_length)]
actions = [
tf.zeros(policy.action_spec.shape)[None, :] for _ in range(ctx_length)
]
rewards = [[0.] for _ in range(ctx_length)]
latent_action = None
i = 0
while not timestep.is_last():
if embed_training_window and (i % embed_training_window == 0 or
embed_training_window <= 2):
latent_action = None
if ctx_length:
states.append(apply_mask(timestep.observation))
if len(states) > ctx_length:
states.pop(0)
actions.pop(0)
rewards.pop(0)
action = policy.act(
tf.stack(states, axis=1),
actions=tf.stack(actions, axis=1),
rewards=tf.stack(rewards, axis=1))
actions.append(action)
else:
if embed_training_window:
action, latent_action = policy.act(
apply_mask(timestep.observation), latent_action=latent_action)
else:
action = policy.act(apply_mask(timestep.observation))
timestep = env.step(action)
if ctx_length:
rewards.append(timestep.reward)
total_returns += timestep.reward[0]
total_timesteps += 1
i += 1
return total_returns / num_episodes, total_timesteps / num_episodes | en | 0.834176 | # coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Policy evaluation. # pylint: disable=g-bare-generic Evaluates the policy. Args: env: Environment to evaluate the policy on. policy: Policy to evaluate. num_episodes: A number of episodes to average the policy on. ctx_length: number of previous steps to compute context from. embed_training_window: window size used during embed training. state_mask_fn: state masking function for partially obs envs. Returns: Averaged reward and a total number of steps. | 2.110578 | 2 |
src/nwb_conversion_tools/datainterfaces/ecephys/intan/intandatainterface.py | ben-dichter-consulting/nwbn-conversion-tools | 0 | 10642 | <gh_stars>0
"""Authors: <NAME> and <NAME>."""
from pathlib import Path
import spikeextractors as se
from pynwb.ecephys import ElectricalSeries
from ..baserecordingextractorinterface import BaseRecordingExtractorInterface
from ....utils import get_schema_from_hdmf_class, FilePathType
try:
from pyintan.intan import read_rhd, read_rhs
HAVE_PYINTAN = True
except ImportError:
HAVE_PYINTAN = False
INSTALL_MESSAGE = "Please install pyintan to use this extractor!"
class IntanRecordingInterface(BaseRecordingExtractorInterface):
"""Primary data interface class for converting a IntanRecordingExtractor."""
RX = se.IntanRecordingExtractor
def __init__(self, file_path: FilePathType, verbose: bool = True):
assert HAVE_PYINTAN, INSTALL_MESSAGE
super().__init__(file_path=file_path, verbose=verbose)
if ".rhd" in Path(self.source_data["file_path"]).suffixes:
intan_file_metadata = read_rhd(self.source_data["file_path"])[1]
else:
intan_file_metadata = read_rhs(self.source_data["file_path"])[1]
exclude_chan_types = ["AUX", "ADC", "VDD"]
valid_channels = [
x for x in intan_file_metadata if not any([y in x["native_channel_name"] for y in exclude_chan_types])
]
group_names = [channel["native_channel_name"].split("-")[0] for channel in valid_channels]
unique_group_names = set(group_names)
group_electrode_numbers = [channel["native_order"] for channel in valid_channels]
channel_ids = self.recording_extractor.get_channel_ids()
for channel_id, channel_group in zip(channel_ids, group_names):
self.recording_extractor.set_channel_property(
channel_id=channel_id, property_name="group_name", value=f"Group{channel_group}"
)
if len(unique_group_names) > 1:
for channel_id, group_electrode_number in zip(channel_ids, group_electrode_numbers):
self.recording_extractor.set_channel_property(
channel_id=channel_id, property_name="group_electrode_number", value=group_electrode_number
)
custom_names = [channel["custom_channel_name"] for channel in valid_channels]
if any(custom_names):
for channel_id, custom_name in zip(channel_ids, custom_names):
self.recording_extractor.set_channel_property(
channel_id=channel_id, property_name="custom_channel_name", value=custom_name
)
def get_metadata_schema(self):
metadata_schema = super().get_metadata_schema()
metadata_schema["properties"]["Ecephys"]["properties"].update(
ElectricalSeries_raw=get_schema_from_hdmf_class(ElectricalSeries)
)
return metadata_schema
def get_metadata(self):
channel_ids = self.recording_extractor.get_channel_ids()
property_names = self.recording_extractor.get_shared_channel_property_names()
ecephys_metadata = dict(
Ecephys=dict(
Device=[
dict(
name="Intan",
description="Intan recording",
manufacturer="Intan",
),
],
ElectrodeGroup=[
dict(
name=group_name,
description=f"Group {group_name} electrodes.",
device="Intan",
location="",
)
for group_name in set(
[
self.recording_extractor.get_channel_property(
channel_id=channel_id, property_name="group_name"
)
for channel_id in channel_ids
]
)
],
Electrodes=[
dict(name="group_name", description="The name of the ElectrodeGroup this electrode is a part of.")
],
ElectricalSeries_raw=dict(name="ElectricalSeries_raw", description="Raw acquisition traces."),
)
)
if "group_electrode_number" in property_names:
ecephys_metadata["Ecephys"]["Electrodes"].append(
dict(name="group_electrode_number", description="0-indexed channel within a group.")
)
if "custom_channel_name" in property_names:
ecephys_metadata["Ecephys"]["Electrodes"].append(
dict(name="custom_channel_name", description="Custom channel name assigned in Intan.")
)
return ecephys_metadata
| """Authors: <NAME> and <NAME>."""
from pathlib import Path
import spikeextractors as se
from pynwb.ecephys import ElectricalSeries
from ..baserecordingextractorinterface import BaseRecordingExtractorInterface
from ....utils import get_schema_from_hdmf_class, FilePathType
try:
from pyintan.intan import read_rhd, read_rhs
HAVE_PYINTAN = True
except ImportError:
HAVE_PYINTAN = False
INSTALL_MESSAGE = "Please install pyintan to use this extractor!"
class IntanRecordingInterface(BaseRecordingExtractorInterface):
"""Primary data interface class for converting a IntanRecordingExtractor."""
RX = se.IntanRecordingExtractor
def __init__(self, file_path: FilePathType, verbose: bool = True):
assert HAVE_PYINTAN, INSTALL_MESSAGE
super().__init__(file_path=file_path, verbose=verbose)
if ".rhd" in Path(self.source_data["file_path"]).suffixes:
intan_file_metadata = read_rhd(self.source_data["file_path"])[1]
else:
intan_file_metadata = read_rhs(self.source_data["file_path"])[1]
exclude_chan_types = ["AUX", "ADC", "VDD"]
valid_channels = [
x for x in intan_file_metadata if not any([y in x["native_channel_name"] for y in exclude_chan_types])
]
group_names = [channel["native_channel_name"].split("-")[0] for channel in valid_channels]
unique_group_names = set(group_names)
group_electrode_numbers = [channel["native_order"] for channel in valid_channels]
channel_ids = self.recording_extractor.get_channel_ids()
for channel_id, channel_group in zip(channel_ids, group_names):
self.recording_extractor.set_channel_property(
channel_id=channel_id, property_name="group_name", value=f"Group{channel_group}"
)
if len(unique_group_names) > 1:
for channel_id, group_electrode_number in zip(channel_ids, group_electrode_numbers):
self.recording_extractor.set_channel_property(
channel_id=channel_id, property_name="group_electrode_number", value=group_electrode_number
)
custom_names = [channel["custom_channel_name"] for channel in valid_channels]
if any(custom_names):
for channel_id, custom_name in zip(channel_ids, custom_names):
self.recording_extractor.set_channel_property(
channel_id=channel_id, property_name="custom_channel_name", value=custom_name
)
def get_metadata_schema(self):
metadata_schema = super().get_metadata_schema()
metadata_schema["properties"]["Ecephys"]["properties"].update(
ElectricalSeries_raw=get_schema_from_hdmf_class(ElectricalSeries)
)
return metadata_schema
def get_metadata(self):
channel_ids = self.recording_extractor.get_channel_ids()
property_names = self.recording_extractor.get_shared_channel_property_names()
ecephys_metadata = dict(
Ecephys=dict(
Device=[
dict(
name="Intan",
description="Intan recording",
manufacturer="Intan",
),
],
ElectrodeGroup=[
dict(
name=group_name,
description=f"Group {group_name} electrodes.",
device="Intan",
location="",
)
for group_name in set(
[
self.recording_extractor.get_channel_property(
channel_id=channel_id, property_name="group_name"
)
for channel_id in channel_ids
]
)
],
Electrodes=[
dict(name="group_name", description="The name of the ElectrodeGroup this electrode is a part of.")
],
ElectricalSeries_raw=dict(name="ElectricalSeries_raw", description="Raw acquisition traces."),
)
)
if "group_electrode_number" in property_names:
ecephys_metadata["Ecephys"]["Electrodes"].append(
dict(name="group_electrode_number", description="0-indexed channel within a group.")
)
if "custom_channel_name" in property_names:
ecephys_metadata["Ecephys"]["Electrodes"].append(
dict(name="custom_channel_name", description="Custom channel name assigned in Intan.")
)
return ecephys_metadata | en | 0.757962 | Authors: <NAME> and <NAME>. Primary data interface class for converting a IntanRecordingExtractor. | 2.088877 | 2 |
Use.py | XtremeCoder1384/SongDownloader | 1 | 10643 | import os
import youtube_dl
os.system("setup.bat")
playlist = input("Paste the Youtube Playlist URL Here.")
track = 1
print("""THIS TOOL WILL ATTEMPT TO DOWNLOAD THE FIRST 1000 SONGS IN THE QUEUE.\n
PLEASE DO NOT INTERRUPT THE TOOL.
YOU MAY CLOSE THE TOOL WHEN IT DISPLAYS "DONE!".
ALL DOWNLOADED SONGS WILL BE IN THE SAME DIRECTORY THIS FILE IS IN.
TO EXTRACT THEM, FILTER BY MP3.""")
for x in range(1000):
file = open("Downloader.bat","w")
file.write("youtube-dl -x --playlist-start {} --audio-format mp3 --playlist-end {} {}".format(str(track),str(track),playlist))
file.close
os.system("Downloader.bat")
track = track + 1
print("DONE! You may now close this window.")
| import os
import youtube_dl
os.system("setup.bat")
playlist = input("Paste the Youtube Playlist URL Here.")
track = 1
print("""THIS TOOL WILL ATTEMPT TO DOWNLOAD THE FIRST 1000 SONGS IN THE QUEUE.\n
PLEASE DO NOT INTERRUPT THE TOOL.
YOU MAY CLOSE THE TOOL WHEN IT DISPLAYS "DONE!".
ALL DOWNLOADED SONGS WILL BE IN THE SAME DIRECTORY THIS FILE IS IN.
TO EXTRACT THEM, FILTER BY MP3.""")
for x in range(1000):
file = open("Downloader.bat","w")
file.write("youtube-dl -x --playlist-start {} --audio-format mp3 --playlist-end {} {}".format(str(track),str(track),playlist))
file.close
os.system("Downloader.bat")
track = track + 1
print("DONE! You may now close this window.")
| en | 0.362807 | THIS TOOL WILL ATTEMPT TO DOWNLOAD THE FIRST 1000 SONGS IN THE QUEUE.\n
PLEASE DO NOT INTERRUPT THE TOOL.
YOU MAY CLOSE THE TOOL WHEN IT DISPLAYS "DONE!".
ALL DOWNLOADED SONGS WILL BE IN THE SAME DIRECTORY THIS FILE IS IN.
TO EXTRACT THEM, FILTER BY MP3. | 3.22502 | 3 |
site/manage.py | oaoouo/railgun | 0 | 10644 | # coding: utf-8
"""
manage.py
~~~~~~~~~
"""
import os
import sys
import shutil
import platform
from app import app
from gen import Gen
from flask_script import Manager
"""编码设置"""
if (platform.python_version().split('.')[0] == '2'):
# reload(sys) is evil :)
reload(sys)
sys.setdefaultencoding('utf-8')
"""Git配置"""
git_url = app.config['GIT_URL']
git_branch = app.config['BRANCH']
manager = Manager(app)
def first_upload():
if not git_url:
raise
else:
harbor_folder = os.path.join(os.getcwd(), '.harbor')
os.chdir(harbor_folder)
os.popen('git checkout -b %s' % git_branch)
os.popen('git pull %s %s' % (git_url, git_branch))
os.popen('git add .')
os.popen('git commit -m "railgun site update...✅ "')
os.popen('git push -u %s %s' % (git_url, git_branch))
def other_upload():
if not git_url:
raise
else:
harbor_folder = os.path.join(os.getcwd(), '.harbor')
os.chdir(harbor_folder)
os.popen('git checkout %s' % git_branch)
os.popen('git add .')
os.popen('git commit -m "railgun site update...✅ "')
os.popen('git push -u %s %s' % (git_url, git_branch))
def update_static_res():
static_folder = os.path.join(os.getcwd(), 'app/static')
static_build_folder = os.path.join(os.getcwd(), 'app/build/static')
if os.path.isdir(static_build_folder):
shutil.rmtree(static_build_folder)
shutil.copytree(static_folder, static_build_folder)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'build':
_gen = Gen(app)
_gen.gen()
# update static resources
update_static_res()
elif len(sys.argv) > 1 and sys.argv[1] == 'first_upload':
first_upload()
elif len(sys.argv) > 1 and sys.argv[1] == 'other_upload':
other_upload()
else:
manager.run()
| # coding: utf-8
"""
manage.py
~~~~~~~~~
"""
import os
import sys
import shutil
import platform
from app import app
from gen import Gen
from flask_script import Manager
"""编码设置"""
if (platform.python_version().split('.')[0] == '2'):
# reload(sys) is evil :)
reload(sys)
sys.setdefaultencoding('utf-8')
"""Git配置"""
git_url = app.config['GIT_URL']
git_branch = app.config['BRANCH']
manager = Manager(app)
def first_upload():
if not git_url:
raise
else:
harbor_folder = os.path.join(os.getcwd(), '.harbor')
os.chdir(harbor_folder)
os.popen('git checkout -b %s' % git_branch)
os.popen('git pull %s %s' % (git_url, git_branch))
os.popen('git add .')
os.popen('git commit -m "railgun site update...✅ "')
os.popen('git push -u %s %s' % (git_url, git_branch))
def other_upload():
if not git_url:
raise
else:
harbor_folder = os.path.join(os.getcwd(), '.harbor')
os.chdir(harbor_folder)
os.popen('git checkout %s' % git_branch)
os.popen('git add .')
os.popen('git commit -m "railgun site update...✅ "')
os.popen('git push -u %s %s' % (git_url, git_branch))
def update_static_res():
static_folder = os.path.join(os.getcwd(), 'app/static')
static_build_folder = os.path.join(os.getcwd(), 'app/build/static')
if os.path.isdir(static_build_folder):
shutil.rmtree(static_build_folder)
shutil.copytree(static_folder, static_build_folder)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'build':
_gen = Gen(app)
_gen.gen()
# update static resources
update_static_res()
elif len(sys.argv) > 1 and sys.argv[1] == 'first_upload':
first_upload()
elif len(sys.argv) > 1 and sys.argv[1] == 'other_upload':
other_upload()
else:
manager.run()
| en | 0.382236 | # coding: utf-8 manage.py ~~~~~~~~~ 编码设置 # reload(sys) is evil :) Git配置 # update static resources | 2.215065 | 2 |
tests/test_segmenters.py | edoarn/cv-models | 0 | 10645 | from typing import Any
import torch
import torch.nn as nn
from cvmodels.segmentation import unet, deeplab as dl
def output(model: nn.Module, input_batch: torch.Tensor) -> Any:
model.eval()
with torch.no_grad():
return model(input_batch)
def numel(m: torch.nn.Module, only_trainable: bool = True) -> int:
parameters = m.parameters()
if only_trainable:
parameters = list(p for p in parameters if p.requires_grad)
unique = dict((p.data_ptr(), p) for p in parameters).values()
return sum(p.numel() for p in unique)
def test_unet_out_transpose(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
model = unet.UNet(bilinear=False, outputs=1)
assert numel(model) > 31_000_000
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_unet_out_bilinear(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
model = unet.UNet(bilinear=True, outputs=1)
assert numel(model) < 30_000_000
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3_out(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3(variant=variant)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3_pretrain_backbone(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3(variant=variant, pretrained=True)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3_custom():
batch = torch.rand((2, 4, 480, 480))
batches, _, height, width = batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3(in_channels=4, out_channels=2, in_dimension=480, variant=variant, pretrained=True)
out = output(model, batch)
assert out.shape == (batches, 2, height, width)
def test_deeplabv3plus_out(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3Plus(variant=variant)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3plus_pretrain_backbone(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3Plus(variant=variant, pretrained=True)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3plus_custom():
batch = torch.rand((2, 4, 480, 480))
batches, _, height, width = batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3Plus(in_channels=4, out_channels=2, in_dimension=480, variant=variant, pretrained=True)
out = output(model, batch)
assert out.shape == (batches, 2, height, width)
| from typing import Any
import torch
import torch.nn as nn
from cvmodels.segmentation import unet, deeplab as dl
def output(model: nn.Module, input_batch: torch.Tensor) -> Any:
model.eval()
with torch.no_grad():
return model(input_batch)
def numel(m: torch.nn.Module, only_trainable: bool = True) -> int:
parameters = m.parameters()
if only_trainable:
parameters = list(p for p in parameters if p.requires_grad)
unique = dict((p.data_ptr(), p) for p in parameters).values()
return sum(p.numel() for p in unique)
def test_unet_out_transpose(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
model = unet.UNet(bilinear=False, outputs=1)
assert numel(model) > 31_000_000
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_unet_out_bilinear(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
model = unet.UNet(bilinear=True, outputs=1)
assert numel(model) < 30_000_000
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3_out(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3(variant=variant)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3_pretrain_backbone(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3(variant=variant, pretrained=True)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3_custom():
batch = torch.rand((2, 4, 480, 480))
batches, _, height, width = batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3(in_channels=4, out_channels=2, in_dimension=480, variant=variant, pretrained=True)
out = output(model, batch)
assert out.shape == (batches, 2, height, width)
def test_deeplabv3plus_out(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3Plus(variant=variant)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3plus_pretrain_backbone(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3Plus(variant=variant, pretrained=True)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3plus_custom():
batch = torch.rand((2, 4, 480, 480))
batches, _, height, width = batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3Plus(in_channels=4, out_channels=2, in_dimension=480, variant=variant, pretrained=True)
out = output(model, batch)
assert out.shape == (batches, 2, height, width)
| none | 1 | 2.250877 | 2 |
|
bagua/torch_api/contrib/sync_batchnorm.py | mmathys/bagua | 635 | 10646 | <gh_stars>100-1000
# Copyright (c) Uber Technologies, Inc. and its affiliates.
# Copyright (c) 2021 Kuaishou AI Platform & DS3 Lab.
#
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from distutils.version import LooseVersion
import torch
from torch.autograd.function import Function
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
import bagua.torch_api as bagua
from bagua.torch_api.communication import allgather, allreduce
# Backward compat for old PyTorch
if not hasattr(torch.jit, "unused"):
torch.jit.unused = lambda x: x
_SYNC_BN_V2 = LooseVersion(torch.__version__) >= LooseVersion("1.5.0") and LooseVersion(
torch.__version__
) <= LooseVersion("1.6.0")
_SYNC_BN_V3 = LooseVersion(torch.__version__) >= LooseVersion("1.6.0")
_SYNC_BN_V4 = LooseVersion(torch.__version__) >= LooseVersion("1.9.0")
class SyncBatchNorm(_BatchNorm):
r"""Applies synchronous BatchNorm for distributed module with N-dimensional BatchNorm layer(s).
See `BatchNorm <https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm2d.html?highlight=batchnorm#torch.nn.BatchNorm2d>`_ for more details.
Arguments:
num_features: Number of channels :math:`C` from the shape :math:`(N, C, ...)`.
eps: A value added to the denominator for numerical stability. Default: 1e-5.
momentum: The value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1.
affine: A boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``.
track_running_stats: A boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``.
.. note:: Only GPU input tensors are supported in the training mode.
"""
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True,
):
super().__init__(num_features, eps, momentum, affine, track_running_stats)
def _check_input_dim(self, input):
if input.dim() < 2:
raise ValueError(
"expected at least 2D input (got {}D input)".format(input.dim())
)
def _run_bn(self, input):
return F.batch_norm(
input,
self.running_mean,
self.running_var,
self.weight,
self.bias,
self.training or not self.track_running_stats,
self.momentum,
self.eps,
)
@torch.jit.unused
def _maybe_run_sync_bn(self, input):
if bagua.get_world_size() == 1:
return self._run_bn(input)
return _SyncBatchNorm.apply(
input,
self.weight,
self.bias,
self.running_mean,
self.running_var,
self.eps,
self.momentum,
)
def forward(self, input):
# currently only GPU input is supported by underlying kernel from PyTorch
if not input.is_cuda:
raise ValueError("SyncBatchNorm expected input tensor to be on GPU")
self._check_input_dim(input)
if self.training and self.track_running_stats:
assert self.num_batches_tracked is not None
self.num_batches_tracked = self.num_batches_tracked + 1
if not self.training and self.track_running_stats:
return self._run_bn(input)
else:
return self._maybe_run_sync_bn(input)
@classmethod
def convert_sync_batchnorm(cls, module):
r"""Helper function to convert all :attr:`BatchNorm*D` layers in the model to
`torch.nn.SyncBatchNorm <https://pytorch.org/docs/stable/generated/torch.nn.SyncBatchNorm.html?highlight=syncbatchnorm#torch.nn.SyncBatchNorm>`_ layers.
Arguments:
module (nn.Module): Module containing one or more :attr:`BatchNorm*D` layers
Returns:
The original :attr:`module` with the converted :class:`torch.nn.SyncBatchNorm`
layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer,
a new :class:`torch.nn.SyncBatchNorm` layer object will be returned
instead.
.. note:: This function must be called before :meth:`~bagua.torch_api.distributed.BaguaModule.with_bagua` method.
Example::
>>> # Network with nn.BatchNorm layer
>>> model = torch.nn.Sequential(
... torch.nn.Linear(D_in, H),
... torch.nn.ReLU(),
... torch.nn.Linear(H, D_out),
... )
>>> optimizer = torch.optim.SGD(
... model.parameters(),
... lr=0.01,
... momentum=0.9
... )
>>> sync_bn_model = bagua.torch_api.contrib.sync_batchnorm.SyncBatchNorm.convert_sync_batchnorm(model)
>>> bagua_model = sync_bn_model.with_bagua([optimizer], GradientAllReduce())
"""
module_output = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module_output = SyncBatchNorm(
module.num_features,
module.eps,
module.momentum,
module.affine,
module.track_running_stats,
)
if module.affine:
with torch.no_grad():
module_output.weight = module.weight
module_output.bias = module.bias
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
if hasattr(module, "qconfig"):
module_output.qconfig = module.qconfig
for name, child in module.named_children():
module_output.add_module(name, cls.convert_sync_batchnorm(child))
del module
return module_output
class _SyncBatchNorm(Function):
@staticmethod
def forward(self, input, weight, bias, running_mean, running_var, eps, momentum):
input = input.contiguous()
size = input.numel() // input.size(1)
count = torch.tensor([size])
# calculate mean/invstd for input.
mean, invstd = torch.batch_norm_stats(input, eps)
count, mean, invstd = count.cuda(), mean.cuda(), invstd.cuda()
nums_ranks = bagua.get_world_size()
count_all = torch.tensor(
[torch.empty_like(count).cpu().detach().numpy() for _ in range(nums_ranks)]
).cuda()
mean_all = torch.tensor(
[torch.empty_like(mean).cpu().detach().numpy() for _ in range(nums_ranks)]
).cuda()
invstd_all = torch.tensor(
[torch.empty_like(invstd).cpu().detach().numpy() for _ in range(nums_ranks)]
).cuda()
allgather(count.unsqueeze(0), count_all)
allgather(mean.unsqueeze(0), mean_all)
allgather(invstd.unsqueeze(0), invstd_all)
if _SYNC_BN_V3:
counts_for_bngswc = count_all.view(-1).float().to(input.device)
else:
# backwards compatibility
counts_for_bngswc = count_all.view(-1).tolist()
# calculate global mean & invstd
mean, invstd = torch.batch_norm_gather_stats_with_counts(
input,
mean_all,
invstd_all,
running_mean,
running_var,
momentum,
eps,
counts_for_bngswc,
)
self.save_for_backward(input, weight, mean, invstd, count_all)
# apply element-wise normalization
return torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
@staticmethod
def backward(self, grad_output):
grad_output = grad_output.contiguous()
saved_input, weight, mean, invstd, count_all = self.saved_tensors
need_input_grad, need_weight_grad, need_bias_grad = self.needs_input_grad[0:3]
# calculate local stats as well as grad_weight / grad_bias
sum_dy, sum_dy_xmu, grad_weight, grad_bias = torch.batch_norm_backward_reduce(
grad_output,
saved_input,
mean,
invstd,
weight,
need_input_grad,
need_weight_grad,
need_bias_grad,
)
if need_input_grad:
# synchronizing stats used to calculate input gradient.
allreduce(sum_dy, sum_dy)
allreduce(sum_dy_xmu, sum_dy_xmu)
if _SYNC_BN_V4:
# from 1.9.0 on we need a count tensor on all devices
# count_all is calculated as total count across all ranks in forward function
count_all = count_all.to(dtype=torch.int, device=grad_output.device)
elif _SYNC_BN_V2 or _SYNC_BN_V3:
# before 1.9.0 we need the count as an integer to compute means values
count = count_all.sum()
else:
# before 1.5.0, sum_dy was sum of means from every worker, so we just
# need to divide it by number of workers
count = bagua.get_world_size()
# backward pass for gradient calculation
# we are calling into a non-public undocumented function which broke moving to 1.9.0
# https://github.com/pytorch/pytorch/issues/57900
if _SYNC_BN_V4:
# from 1.9.0 on, sums and count parameters expected
grad_input = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_all,
)
else:
# before 1.9.0, mean parameters expected, not sums and count
grad_input = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy / count,
sum_dy_xmu / count,
)
else:
grad_input = None
# synchronizing of grad_weight / grad_bias is not needed as distributed
# training would handle all reduce.
if weight is None or not need_weight_grad:
grad_weight = None
if weight is None or not need_bias_grad:
grad_bias = None
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
| # Copyright (c) Uber Technologies, Inc. and its affiliates.
# Copyright (c) 2021 Kuaishou AI Platform & DS3 Lab.
#
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from distutils.version import LooseVersion
import torch
from torch.autograd.function import Function
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
import bagua.torch_api as bagua
from bagua.torch_api.communication import allgather, allreduce
# Backward compat for old PyTorch
if not hasattr(torch.jit, "unused"):
torch.jit.unused = lambda x: x
_SYNC_BN_V2 = LooseVersion(torch.__version__) >= LooseVersion("1.5.0") and LooseVersion(
torch.__version__
) <= LooseVersion("1.6.0")
_SYNC_BN_V3 = LooseVersion(torch.__version__) >= LooseVersion("1.6.0")
_SYNC_BN_V4 = LooseVersion(torch.__version__) >= LooseVersion("1.9.0")
class SyncBatchNorm(_BatchNorm):
r"""Applies synchronous BatchNorm for distributed module with N-dimensional BatchNorm layer(s).
See `BatchNorm <https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm2d.html?highlight=batchnorm#torch.nn.BatchNorm2d>`_ for more details.
Arguments:
num_features: Number of channels :math:`C` from the shape :math:`(N, C, ...)`.
eps: A value added to the denominator for numerical stability. Default: 1e-5.
momentum: The value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1.
affine: A boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``.
track_running_stats: A boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``.
.. note:: Only GPU input tensors are supported in the training mode.
"""
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True,
):
super().__init__(num_features, eps, momentum, affine, track_running_stats)
def _check_input_dim(self, input):
if input.dim() < 2:
raise ValueError(
"expected at least 2D input (got {}D input)".format(input.dim())
)
def _run_bn(self, input):
return F.batch_norm(
input,
self.running_mean,
self.running_var,
self.weight,
self.bias,
self.training or not self.track_running_stats,
self.momentum,
self.eps,
)
@torch.jit.unused
def _maybe_run_sync_bn(self, input):
if bagua.get_world_size() == 1:
return self._run_bn(input)
return _SyncBatchNorm.apply(
input,
self.weight,
self.bias,
self.running_mean,
self.running_var,
self.eps,
self.momentum,
)
def forward(self, input):
# currently only GPU input is supported by underlying kernel from PyTorch
if not input.is_cuda:
raise ValueError("SyncBatchNorm expected input tensor to be on GPU")
self._check_input_dim(input)
if self.training and self.track_running_stats:
assert self.num_batches_tracked is not None
self.num_batches_tracked = self.num_batches_tracked + 1
if not self.training and self.track_running_stats:
return self._run_bn(input)
else:
return self._maybe_run_sync_bn(input)
@classmethod
def convert_sync_batchnorm(cls, module):
r"""Helper function to convert all :attr:`BatchNorm*D` layers in the model to
`torch.nn.SyncBatchNorm <https://pytorch.org/docs/stable/generated/torch.nn.SyncBatchNorm.html?highlight=syncbatchnorm#torch.nn.SyncBatchNorm>`_ layers.
Arguments:
module (nn.Module): Module containing one or more :attr:`BatchNorm*D` layers
Returns:
The original :attr:`module` with the converted :class:`torch.nn.SyncBatchNorm`
layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer,
a new :class:`torch.nn.SyncBatchNorm` layer object will be returned
instead.
.. note:: This function must be called before :meth:`~bagua.torch_api.distributed.BaguaModule.with_bagua` method.
Example::
>>> # Network with nn.BatchNorm layer
>>> model = torch.nn.Sequential(
... torch.nn.Linear(D_in, H),
... torch.nn.ReLU(),
... torch.nn.Linear(H, D_out),
... )
>>> optimizer = torch.optim.SGD(
... model.parameters(),
... lr=0.01,
... momentum=0.9
... )
>>> sync_bn_model = bagua.torch_api.contrib.sync_batchnorm.SyncBatchNorm.convert_sync_batchnorm(model)
>>> bagua_model = sync_bn_model.with_bagua([optimizer], GradientAllReduce())
"""
module_output = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module_output = SyncBatchNorm(
module.num_features,
module.eps,
module.momentum,
module.affine,
module.track_running_stats,
)
if module.affine:
with torch.no_grad():
module_output.weight = module.weight
module_output.bias = module.bias
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
if hasattr(module, "qconfig"):
module_output.qconfig = module.qconfig
for name, child in module.named_children():
module_output.add_module(name, cls.convert_sync_batchnorm(child))
del module
return module_output
class _SyncBatchNorm(Function):
@staticmethod
def forward(self, input, weight, bias, running_mean, running_var, eps, momentum):
input = input.contiguous()
size = input.numel() // input.size(1)
count = torch.tensor([size])
# calculate mean/invstd for input.
mean, invstd = torch.batch_norm_stats(input, eps)
count, mean, invstd = count.cuda(), mean.cuda(), invstd.cuda()
nums_ranks = bagua.get_world_size()
count_all = torch.tensor(
[torch.empty_like(count).cpu().detach().numpy() for _ in range(nums_ranks)]
).cuda()
mean_all = torch.tensor(
[torch.empty_like(mean).cpu().detach().numpy() for _ in range(nums_ranks)]
).cuda()
invstd_all = torch.tensor(
[torch.empty_like(invstd).cpu().detach().numpy() for _ in range(nums_ranks)]
).cuda()
allgather(count.unsqueeze(0), count_all)
allgather(mean.unsqueeze(0), mean_all)
allgather(invstd.unsqueeze(0), invstd_all)
if _SYNC_BN_V3:
counts_for_bngswc = count_all.view(-1).float().to(input.device)
else:
# backwards compatibility
counts_for_bngswc = count_all.view(-1).tolist()
# calculate global mean & invstd
mean, invstd = torch.batch_norm_gather_stats_with_counts(
input,
mean_all,
invstd_all,
running_mean,
running_var,
momentum,
eps,
counts_for_bngswc,
)
self.save_for_backward(input, weight, mean, invstd, count_all)
# apply element-wise normalization
return torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
@staticmethod
def backward(self, grad_output):
grad_output = grad_output.contiguous()
saved_input, weight, mean, invstd, count_all = self.saved_tensors
need_input_grad, need_weight_grad, need_bias_grad = self.needs_input_grad[0:3]
# calculate local stats as well as grad_weight / grad_bias
sum_dy, sum_dy_xmu, grad_weight, grad_bias = torch.batch_norm_backward_reduce(
grad_output,
saved_input,
mean,
invstd,
weight,
need_input_grad,
need_weight_grad,
need_bias_grad,
)
if need_input_grad:
# synchronizing stats used to calculate input gradient.
allreduce(sum_dy, sum_dy)
allreduce(sum_dy_xmu, sum_dy_xmu)
if _SYNC_BN_V4:
# from 1.9.0 on we need a count tensor on all devices
# count_all is calculated as total count across all ranks in forward function
count_all = count_all.to(dtype=torch.int, device=grad_output.device)
elif _SYNC_BN_V2 or _SYNC_BN_V3:
# before 1.9.0 we need the count as an integer to compute means values
count = count_all.sum()
else:
# before 1.5.0, sum_dy was sum of means from every worker, so we just
# need to divide it by number of workers
count = bagua.get_world_size()
# backward pass for gradient calculation
# we are calling into a non-public undocumented function which broke moving to 1.9.0
# https://github.com/pytorch/pytorch/issues/57900
if _SYNC_BN_V4:
# from 1.9.0 on, sums and count parameters expected
grad_input = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_all,
)
else:
# before 1.9.0, mean parameters expected, not sums and count
grad_input = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy / count,
sum_dy_xmu / count,
)
else:
grad_input = None
# synchronizing of grad_weight / grad_bias is not needed as distributed
# training would handle all reduce.
if weight is None or not need_weight_grad:
grad_weight = None
if weight is None or not need_bias_grad:
grad_bias = None
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None | en | 0.743994 | # Copyright (c) Uber Technologies, Inc. and its affiliates. # Copyright (c) 2021 Kuaishou AI Platform & DS3 Lab. # # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # Backward compat for old PyTorch Applies synchronous BatchNorm for distributed module with N-dimensional BatchNorm layer(s). See `BatchNorm <https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm2d.html?highlight=batchnorm#torch.nn.BatchNorm2d>`_ for more details. Arguments: num_features: Number of channels :math:`C` from the shape :math:`(N, C, ...)`. eps: A value added to the denominator for numerical stability. Default: 1e-5. momentum: The value used for the running_mean and running_var computation. Can be set to ``None`` for cumulative moving average (i.e. simple average). Default: 0.1. affine: A boolean value that when set to ``True``, this module has learnable affine parameters. Default: ``True``. track_running_stats: A boolean value that when set to ``True``, this module tracks the running mean and variance, and when set to ``False``, this module does not track such statistics and always uses batch statistics in both training and eval modes. Default: ``True``. .. note:: Only GPU input tensors are supported in the training mode. # currently only GPU input is supported by underlying kernel from PyTorch Helper function to convert all :attr:`BatchNorm*D` layers in the model to `torch.nn.SyncBatchNorm <https://pytorch.org/docs/stable/generated/torch.nn.SyncBatchNorm.html?highlight=syncbatchnorm#torch.nn.SyncBatchNorm>`_ layers. Arguments: module (nn.Module): Module containing one or more :attr:`BatchNorm*D` layers Returns: The original :attr:`module` with the converted :class:`torch.nn.SyncBatchNorm` layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer, a new :class:`torch.nn.SyncBatchNorm` layer object will be returned instead. .. note:: This function must be called before :meth:`~bagua.torch_api.distributed.BaguaModule.with_bagua` method. Example:: >>> # Network with nn.BatchNorm layer >>> model = torch.nn.Sequential( ... torch.nn.Linear(D_in, H), ... torch.nn.ReLU(), ... torch.nn.Linear(H, D_out), ... ) >>> optimizer = torch.optim.SGD( ... model.parameters(), ... lr=0.01, ... momentum=0.9 ... ) >>> sync_bn_model = bagua.torch_api.contrib.sync_batchnorm.SyncBatchNorm.convert_sync_batchnorm(model) >>> bagua_model = sync_bn_model.with_bagua([optimizer], GradientAllReduce()) # calculate mean/invstd for input. # backwards compatibility # calculate global mean & invstd # apply element-wise normalization # calculate local stats as well as grad_weight / grad_bias # synchronizing stats used to calculate input gradient. # from 1.9.0 on we need a count tensor on all devices # count_all is calculated as total count across all ranks in forward function # before 1.9.0 we need the count as an integer to compute means values # before 1.5.0, sum_dy was sum of means from every worker, so we just # need to divide it by number of workers # backward pass for gradient calculation # we are calling into a non-public undocumented function which broke moving to 1.9.0 # https://github.com/pytorch/pytorch/issues/57900 # from 1.9.0 on, sums and count parameters expected # before 1.9.0, mean parameters expected, not sums and count # synchronizing of grad_weight / grad_bias is not needed as distributed # training would handle all reduce. | 2.140937 | 2 |
tests/test_classification_metric.py | DaveFClarke/ml_bias_checking | 2 | 10647 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import pandas as pd
from aif360.datasets import BinaryLabelDataset
from aif360.metrics import ClassificationMetric
def test_generalized_entropy_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.generalized_entropy_index() == 0.2
pred = data.copy()
pred[:, -1] = np.array([0, 1, 1, 0, 0, 0, 0, 1, 1, 1])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.generalized_entropy_index() == 0.3
def test_theil_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.theil_index() == 4*np.log(2)/10
def test_between_all_groups():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
b = np.array([1, 1, 1.25, 1.25, 1.25, 1.25, 0.75, 0.75, 0.75, 0.75])
assert cm.between_all_groups_generalized_entropy_index() == 1/20*np.sum(b**2 - 1)
def test_between_group():
data = np.array([[0, 0, 1],
[0, 1, 0],
[1, 1, 0],
[1, 1, 1],
[1, 0, 0],
[1, 0, 0]])
pred = data.copy()
pred[[0, 3], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'feat2', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'feat2', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat', 'feat2'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat', 'feat2'])
cm = ClassificationMetric(bld, bld2, unprivileged_groups=[{'feat': 0}],
privileged_groups=[{'feat': 1}])
b = np.array([0.5, 0.5, 1.25, 1.25, 1.25, 1.25])
assert cm.between_group_generalized_entropy_index() == 1/12*np.sum(b**2 - 1)
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import pandas as pd
from aif360.datasets import BinaryLabelDataset
from aif360.metrics import ClassificationMetric
def test_generalized_entropy_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.generalized_entropy_index() == 0.2
pred = data.copy()
pred[:, -1] = np.array([0, 1, 1, 0, 0, 0, 0, 1, 1, 1])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.generalized_entropy_index() == 0.3
def test_theil_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.theil_index() == 4*np.log(2)/10
def test_between_all_groups():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
b = np.array([1, 1, 1.25, 1.25, 1.25, 1.25, 0.75, 0.75, 0.75, 0.75])
assert cm.between_all_groups_generalized_entropy_index() == 1/20*np.sum(b**2 - 1)
def test_between_group():
data = np.array([[0, 0, 1],
[0, 1, 0],
[1, 1, 0],
[1, 1, 1],
[1, 0, 0],
[1, 0, 0]])
pred = data.copy()
pred[[0, 3], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'feat2', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'feat2', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat', 'feat2'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat', 'feat2'])
cm = ClassificationMetric(bld, bld2, unprivileged_groups=[{'feat': 0}],
privileged_groups=[{'feat': 1}])
b = np.array([0.5, 0.5, 1.25, 1.25, 1.25, 1.25])
assert cm.between_group_generalized_entropy_index() == 1/12*np.sum(b**2 - 1)
| none | 1 | 2.478663 | 2 |
|
PDA/extra_assignments/10.6. Dicts_ Countries and cities/solution/main.py | EMbeDS-education/StatsAndComputing20212022 | 2 | 10648 | city_country = {}
for _ in range(int(input())):
country, *cities = input().split()
for city in cities:
city_country[city] = country
for _ in range(int(input())):
print(city_country[input()]) | city_country = {}
for _ in range(int(input())):
country, *cities = input().split()
for city in cities:
city_country[city] = country
for _ in range(int(input())):
print(city_country[input()]) | none | 1 | 3.767881 | 4 |
|
config.py | oyasr/mudawen | 0 | 10649 | <reponame>oyasr/mudawen<gh_stars>0
import os
from dotenv import load_dotenv
load_dotenv()
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.getenv('SECRET_KEY') or os.urandom(32)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
MAIL_SERVER = os.getenv('MAIL_SERVER') or 'smtp.googlemail.com'
MAIL_PORT = int(os.environ.get('MAIL_PORT', '587'))
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in \
['true', 'on', '1']
MAIL_USERNAME = os.getenv('MAIL_USERNAME')
MAIL_PASSWORD = <PASSWORD>('<PASSWORD>')
MUDAWEN_MAIL_SUBJECT_PREFIX = '[Mudawen]'
MUDAWEN_MAIL_SENDER = '<NAME> <<EMAIL>>'
MUDAWEN_ADMIN = os.getenv('MUDAWEN_ADMIN')
MUDAWEN_POSTS_PER_PAGE = 20
MUDAWEN_FOLLOWERS_PER_PAGE = 50
MUDAWEN_COMMENTS_PER_PAGE = 30
MUDAWEN_QUERY_TIME_LIMIT = 0.5
@staticmethod
def init_app(app):
pass
class DevConfig(Config):
ENV = 'development'
SQLALCHEMY_DATABASE_URI = os.getenv('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestConfig(Config):
TESTING = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = os.getenv('TEST_DATABASE_URL') or \
'sqlite://'
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevConfig,
'testing': TestConfig,
'production': ProductionConfig,
'default': DevConfig
}
| import os
from dotenv import load_dotenv
load_dotenv()
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.getenv('SECRET_KEY') or os.urandom(32)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
MAIL_SERVER = os.getenv('MAIL_SERVER') or 'smtp.googlemail.com'
MAIL_PORT = int(os.environ.get('MAIL_PORT', '587'))
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in \
['true', 'on', '1']
MAIL_USERNAME = os.getenv('MAIL_USERNAME')
MAIL_PASSWORD = <PASSWORD>('<PASSWORD>')
MUDAWEN_MAIL_SUBJECT_PREFIX = '[Mudawen]'
MUDAWEN_MAIL_SENDER = '<NAME> <<EMAIL>>'
MUDAWEN_ADMIN = os.getenv('MUDAWEN_ADMIN')
MUDAWEN_POSTS_PER_PAGE = 20
MUDAWEN_FOLLOWERS_PER_PAGE = 50
MUDAWEN_COMMENTS_PER_PAGE = 30
MUDAWEN_QUERY_TIME_LIMIT = 0.5
@staticmethod
def init_app(app):
pass
class DevConfig(Config):
ENV = 'development'
SQLALCHEMY_DATABASE_URI = os.getenv('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestConfig(Config):
TESTING = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = os.getenv('TEST_DATABASE_URL') or \
'sqlite://'
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevConfig,
'testing': TestConfig,
'production': ProductionConfig,
'default': DevConfig
} | none | 1 | 2.072506 | 2 |
|
experiments/async_tests/async_3.py | 10ks/py_utils | 0 | 10650 | <gh_stars>0
import asyncio
async def wait_sec(l):
print("Before wait")
await asyncio.sleep(1)
print("After wait")
l[0] = False
async def main():
# await asyncio.gather(wait_sec([True]), wait_sec([True]), wait_sec([True]))
run = [True]
asyncio.create_task(wait_sec(run))
await asyncio.sleep(0)
print("continuing main")
while run[0]:
print(".")
await asyncio.sleep(0.1)
# for i in range(10):
# print(i)
# # time.sleep(0.2)
# # await asyncio.sleep(0)
# await asyncio.sleep(0.2)
if __name__ == "__main__":
import time
s = time.perf_counter()
asyncio.run(main())
# Completing unfinished tasks (throws a warning)
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main())
# pending = asyncio.Task.all_tasks()
# loop.run_until_complete(asyncio.gather(*pending))
elapsed = time.perf_counter() - s
print(f"{__file__} executed in {elapsed:0.2f} seconds.")
| import asyncio
async def wait_sec(l):
print("Before wait")
await asyncio.sleep(1)
print("After wait")
l[0] = False
async def main():
# await asyncio.gather(wait_sec([True]), wait_sec([True]), wait_sec([True]))
run = [True]
asyncio.create_task(wait_sec(run))
await asyncio.sleep(0)
print("continuing main")
while run[0]:
print(".")
await asyncio.sleep(0.1)
# for i in range(10):
# print(i)
# # time.sleep(0.2)
# # await asyncio.sleep(0)
# await asyncio.sleep(0.2)
if __name__ == "__main__":
import time
s = time.perf_counter()
asyncio.run(main())
# Completing unfinished tasks (throws a warning)
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main())
# pending = asyncio.Task.all_tasks()
# loop.run_until_complete(asyncio.gather(*pending))
elapsed = time.perf_counter() - s
print(f"{__file__} executed in {elapsed:0.2f} seconds.") | en | 0.475588 | # await asyncio.gather(wait_sec([True]), wait_sec([True]), wait_sec([True])) # for i in range(10): # print(i) # # time.sleep(0.2) # # await asyncio.sleep(0) # await asyncio.sleep(0.2) # Completing unfinished tasks (throws a warning) # loop = asyncio.get_event_loop() # loop.run_until_complete(main()) # pending = asyncio.Task.all_tasks() # loop.run_until_complete(asyncio.gather(*pending)) | 3.408466 | 3 |
vk_bot/mods/util/calculator.py | triangle1984/GLaDOS | 3 | 10651 | # from vk_bot.core.modules.basicplug import BasicPlug
# import math
# class Calculator(BasicPlug):
# doc = "Калькулятор"
# command = ("калькулятор",)
# def main(self):
# try:
# x = self.text[1]; x = int(x)
# encalc = self.text[2]; encalc = encalc.lower()
# y = self.text[3]; y = int(y)
# except:
# self.sendmsg("""Пример команды: /калькулятор 2 + 2
# Использовать можно только 2 числа, и только через пробел""")
# return
# if encalc == "+" or encalc == "сложение":
# result = x + y
# elif encalc == "-" or encalc == "вычитание":
# result = x - y
# elif encalc == "*" or encalc == "умножение":
# result = x * y
# elif encalc == "**" or encalc == "степень" or encalc == "^":
# if x > 999 or y > 999:
# return
# result = x ** y
# elif encalc == "":
# try:
# x / y
# except ZeroDivisionError:
# result = "взорвать планету хочешь?"
# elif encalc == "корень":
# result = math.sqrt(x), math.sqrt(y)
# elif encalc == "синус":
# result = math.sin(x), math.sin(y)
# elif encalc == "косинус":
# result = math.cos(x), math.cos(y)
# else:
# return
# self.sendmsg(f"Ваш результат: {result}")
| # from vk_bot.core.modules.basicplug import BasicPlug
# import math
# class Calculator(BasicPlug):
# doc = "Калькулятор"
# command = ("калькулятор",)
# def main(self):
# try:
# x = self.text[1]; x = int(x)
# encalc = self.text[2]; encalc = encalc.lower()
# y = self.text[3]; y = int(y)
# except:
# self.sendmsg("""Пример команды: /калькулятор 2 + 2
# Использовать можно только 2 числа, и только через пробел""")
# return
# if encalc == "+" or encalc == "сложение":
# result = x + y
# elif encalc == "-" or encalc == "вычитание":
# result = x - y
# elif encalc == "*" or encalc == "умножение":
# result = x * y
# elif encalc == "**" or encalc == "степень" or encalc == "^":
# if x > 999 or y > 999:
# return
# result = x ** y
# elif encalc == "":
# try:
# x / y
# except ZeroDivisionError:
# result = "взорвать планету хочешь?"
# elif encalc == "корень":
# result = math.sqrt(x), math.sqrt(y)
# elif encalc == "синус":
# result = math.sin(x), math.sin(y)
# elif encalc == "косинус":
# result = math.cos(x), math.cos(y)
# else:
# return
# self.sendmsg(f"Ваш результат: {result}")
| ru | 0.234064 | # from vk_bot.core.modules.basicplug import BasicPlug # import math # class Calculator(BasicPlug): # doc = "Калькулятор" # command = ("калькулятор",) # def main(self): # try: # x = self.text[1]; x = int(x) # encalc = self.text[2]; encalc = encalc.lower() # y = self.text[3]; y = int(y) # except: # self.sendmsg("""Пример команды: /калькулятор 2 + 2 # Использовать можно только 2 числа, и только через пробел""") # return # if encalc == "+" or encalc == "сложение": # result = x + y # elif encalc == "-" or encalc == "вычитание": # result = x - y # elif encalc == "*" or encalc == "умножение": # result = x * y # elif encalc == "**" or encalc == "степень" or encalc == "^": # if x > 999 or y > 999: # return # result = x ** y # elif encalc == "": # try: # x / y # except ZeroDivisionError: # result = "взорвать планету хочешь?" # elif encalc == "корень": # result = math.sqrt(x), math.sqrt(y) # elif encalc == "синус": # result = math.sin(x), math.sin(y) # elif encalc == "косинус": # result = math.cos(x), math.cos(y) # else: # return # self.sendmsg(f"Ваш результат: {result}") | 3.136912 | 3 |
sample-input/homogeneous/geometry.py | AI-Pranto/OpenMOC | 97 | 10652 | import openmoc
import openmoc.log as log
import openmoc.plotter as plotter
import openmoc.materialize as materialize
log.set_log_level('NORMAL')
###############################################################################
########################### Creating Materials ############################
###############################################################################
log.py_printf('NORMAL', 'Importing materials data from HDF5...')
materials = openmoc.materialize.load_from_hdf5('c5g7-mgxs.h5', '../')
###############################################################################
########################### Creating Surfaces #############################
###############################################################################
log.py_printf('NORMAL', 'Creating surfaces...')
xmin = openmoc.XPlane(x=-5.0, name='xmin')
xmax = openmoc.XPlane(x= 5.0, name='xmax')
ymin = openmoc.YPlane(y=-5.0, name='ymin')
ymax = openmoc.YPlane(y= 5.0, name='ymax')
zmin = openmoc.ZPlane(z=-5.0, name='zmin')
zmax = openmoc.ZPlane(z= 5.0, name='zmax')
xmin.setBoundaryType(openmoc.REFLECTIVE)
xmax.setBoundaryType(openmoc.REFLECTIVE)
ymin.setBoundaryType(openmoc.REFLECTIVE)
ymax.setBoundaryType(openmoc.REFLECTIVE)
zmin.setBoundaryType(openmoc.REFLECTIVE)
zmax.setBoundaryType(openmoc.REFLECTIVE)
###############################################################################
############################# Creating Cells ##############################
###############################################################################
log.py_printf('NORMAL', 'Creating cells...')
fuel = openmoc.Cell(name='fuel')
fuel.setFill(materials['UO2'])
moderator = openmoc.Cell(name='moderator')
moderator.setFill(materials['UO2'])
root_cell = openmoc.Cell(name='root cell')
root_cell.addSurface(halfspace=+1, surface=xmin)
root_cell.addSurface(halfspace=-1, surface=xmax)
root_cell.addSurface(halfspace=+1, surface=ymin)
root_cell.addSurface(halfspace=-1, surface=ymax)
root_cell.addSurface(halfspace=+1, surface=zmin)
root_cell.addSurface(halfspace=-1, surface=zmax)
###############################################################################
########################### Creating Universes ############################
###############################################################################
log.py_printf('NORMAL', 'Creating universes...')
fue_univ = openmoc.Universe(name='homogeneous fue cell')
fue_univ.addCell(fuel)
mod_univ = openmoc.Universe(name='homogeneous mod cell')
mod_univ.addCell(moderator)
root_universe = openmoc.Universe(name='root universe')
root_universe.addCell(root_cell)
###############################################################################
########################### Creating Lattices #############################
###############################################################################
log.py_printf('NORMAL', 'Creating simple 10 x 10 lattice...')
f = fue_univ
lattice = openmoc.Lattice(name='10x10 lattice')
lattice.setWidth(width_x=1.0, width_y=1.0, width_z=1.0)
lattice.setUniverses([[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]]])
root_cell.setFill(lattice)
###############################################################################
########################## Creating the Geometry ##########################
###############################################################################
log.py_printf('NORMAL', 'Creating geometry...')
geometry = openmoc.Geometry()
geometry.setRootUniverse(root_universe)
geometry.initializeFlatSourceRegions()
| import openmoc
import openmoc.log as log
import openmoc.plotter as plotter
import openmoc.materialize as materialize
log.set_log_level('NORMAL')
###############################################################################
########################### Creating Materials ############################
###############################################################################
log.py_printf('NORMAL', 'Importing materials data from HDF5...')
materials = openmoc.materialize.load_from_hdf5('c5g7-mgxs.h5', '../')
###############################################################################
########################### Creating Surfaces #############################
###############################################################################
log.py_printf('NORMAL', 'Creating surfaces...')
xmin = openmoc.XPlane(x=-5.0, name='xmin')
xmax = openmoc.XPlane(x= 5.0, name='xmax')
ymin = openmoc.YPlane(y=-5.0, name='ymin')
ymax = openmoc.YPlane(y= 5.0, name='ymax')
zmin = openmoc.ZPlane(z=-5.0, name='zmin')
zmax = openmoc.ZPlane(z= 5.0, name='zmax')
xmin.setBoundaryType(openmoc.REFLECTIVE)
xmax.setBoundaryType(openmoc.REFLECTIVE)
ymin.setBoundaryType(openmoc.REFLECTIVE)
ymax.setBoundaryType(openmoc.REFLECTIVE)
zmin.setBoundaryType(openmoc.REFLECTIVE)
zmax.setBoundaryType(openmoc.REFLECTIVE)
###############################################################################
############################# Creating Cells ##############################
###############################################################################
log.py_printf('NORMAL', 'Creating cells...')
fuel = openmoc.Cell(name='fuel')
fuel.setFill(materials['UO2'])
moderator = openmoc.Cell(name='moderator')
moderator.setFill(materials['UO2'])
root_cell = openmoc.Cell(name='root cell')
root_cell.addSurface(halfspace=+1, surface=xmin)
root_cell.addSurface(halfspace=-1, surface=xmax)
root_cell.addSurface(halfspace=+1, surface=ymin)
root_cell.addSurface(halfspace=-1, surface=ymax)
root_cell.addSurface(halfspace=+1, surface=zmin)
root_cell.addSurface(halfspace=-1, surface=zmax)
###############################################################################
########################### Creating Universes ############################
###############################################################################
log.py_printf('NORMAL', 'Creating universes...')
fue_univ = openmoc.Universe(name='homogeneous fue cell')
fue_univ.addCell(fuel)
mod_univ = openmoc.Universe(name='homogeneous mod cell')
mod_univ.addCell(moderator)
root_universe = openmoc.Universe(name='root universe')
root_universe.addCell(root_cell)
###############################################################################
########################### Creating Lattices #############################
###############################################################################
log.py_printf('NORMAL', 'Creating simple 10 x 10 lattice...')
f = fue_univ
lattice = openmoc.Lattice(name='10x10 lattice')
lattice.setWidth(width_x=1.0, width_y=1.0, width_z=1.0)
lattice.setUniverses([[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]]])
root_cell.setFill(lattice)
###############################################################################
########################## Creating the Geometry ##########################
###############################################################################
log.py_printf('NORMAL', 'Creating geometry...')
geometry = openmoc.Geometry()
geometry.setRootUniverse(root_universe)
geometry.initializeFlatSourceRegions()
| de | 0.847286 | ############################################################################### ########################### Creating Materials ############################ ############################################################################### ############################################################################### ########################### Creating Surfaces ############################# ############################################################################### ############################################################################### ############################# Creating Cells ############################## ############################################################################### ############################################################################### ########################### Creating Universes ############################ ############################################################################### ############################################################################### ########################### Creating Lattices ############################# ############################################################################### ############################################################################### ########################## Creating the Geometry ########################## ############################################################################### | 1.948448 | 2 |
google/ads/google_ads/v5/__init__.py | arammaliachi/google-ads-python | 1 | 10653 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import sys
from google.ads.google_ads import util
if sys.version_info < (3, 6):
raise ImportError("This module requires Python 3.6 or later.")
_lazy_name_to_package_map = {
"account_budget_proposal_service_client": "google.ads.google_ads.v5.services",
"account_budget_service_client": "google.ads.google_ads.v5.services",
"account_link_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_asset_view_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_service_client": "google.ads.google_ads.v5.services",
"ad_group_audience_view_service_client": "google.ads.google_ads.v5.services",
"ad_group_bid_modifier_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_simulation_service_client": "google.ads.google_ads.v5.services",
"ad_group_extension_setting_service_client": "google.ads.google_ads.v5.services",
"ad_group_feed_service_client": "google.ads.google_ads.v5.services",
"ad_group_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_service_client": "google.ads.google_ads.v5.services",
"ad_group_simulation_service_client": "google.ads.google_ads.v5.services",
"ad_parameter_service_client": "google.ads.google_ads.v5.services",
"ad_schedule_view_service_client": "google.ads.google_ads.v5.services",
"ad_service_client": "google.ads.google_ads.v5.services",
"age_range_view_service_client": "google.ads.google_ads.v5.services",
"asset_service_client": "google.ads.google_ads.v5.services",
"batch_job_service_client": "google.ads.google_ads.v5.services",
"bidding_strategy_service_client": "google.ads.google_ads.v5.services",
"billing_setup_service_client": "google.ads.google_ads.v5.services",
"campaign_asset_service_client": "google.ads.google_ads.v5.services",
"campaign_audience_view_service_client": "google.ads.google_ads.v5.services",
"campaign_bid_modifier_service_client": "google.ads.google_ads.v5.services",
"campaign_budget_service_client": "google.ads.google_ads.v5.services",
"campaign_criterion_service_client": "google.ads.google_ads.v5.services",
"campaign_criterion_simulation_service_client": "google.ads.google_ads.v5.services",
"campaign_draft_service_client": "google.ads.google_ads.v5.services",
"campaign_experiment_service_client": "google.ads.google_ads.v5.services",
"campaign_extension_setting_service_client": "google.ads.google_ads.v5.services",
"campaign_feed_service_client": "google.ads.google_ads.v5.services",
"campaign_label_service_client": "google.ads.google_ads.v5.services",
"campaign_service_client": "google.ads.google_ads.v5.services",
"campaign_shared_set_service_client": "google.ads.google_ads.v5.services",
"carrier_constant_service_client": "google.ads.google_ads.v5.services",
"change_status_service_client": "google.ads.google_ads.v5.services",
"click_view_service_client": "google.ads.google_ads.v5.services",
"conversion_action_service_client": "google.ads.google_ads.v5.services",
"conversion_adjustment_upload_service_client": "google.ads.google_ads.v5.services",
"conversion_upload_service_client": "google.ads.google_ads.v5.services",
"currency_constant_service_client": "google.ads.google_ads.v5.services",
"custom_interest_service_client": "google.ads.google_ads.v5.services",
"customer_client_link_service_client": "google.ads.google_ads.v5.services",
"customer_client_service_client": "google.ads.google_ads.v5.services",
"customer_extension_setting_service_client": "google.ads.google_ads.v5.services",
"customer_feed_service_client": "google.ads.google_ads.v5.services",
"customer_label_service_client": "google.ads.google_ads.v5.services",
"customer_manager_link_service_client": "google.ads.google_ads.v5.services",
"customer_negative_criterion_service_client": "google.ads.google_ads.v5.services",
"customer_service_client": "google.ads.google_ads.v5.services",
"detail_placement_view_service_client": "google.ads.google_ads.v5.services",
"display_keyword_view_service_client": "google.ads.google_ads.v5.services",
"distance_view_service_client": "google.ads.google_ads.v5.services",
"domain_category_service_client": "google.ads.google_ads.v5.services",
"dynamic_search_ads_search_term_view_service_client": "google.ads.google_ads.v5.services",
"expanded_landing_page_view_service_client": "google.ads.google_ads.v5.services",
"extension_feed_item_service_client": "google.ads.google_ads.v5.services",
"feed_item_service_client": "google.ads.google_ads.v5.services",
"feed_item_target_service_client": "google.ads.google_ads.v5.services",
"feed_mapping_service_client": "google.ads.google_ads.v5.services",
"feed_placeholder_view_service_client": "google.ads.google_ads.v5.services",
"feed_service_client": "google.ads.google_ads.v5.services",
"gender_view_service_client": "google.ads.google_ads.v5.services",
"geo_target_constant_service_client": "google.ads.google_ads.v5.services",
"geographic_view_service_client": "google.ads.google_ads.v5.services",
"google_ads_field_service_client": "google.ads.google_ads.v5.services",
"google_ads_service_client": "google.ads.google_ads.v5.services",
"group_placement_view_service_client": "google.ads.google_ads.v5.services",
"hotel_group_view_service_client": "google.ads.google_ads.v5.services",
"hotel_performance_view_service_client": "google.ads.google_ads.v5.services",
"income_range_view_service_client": "google.ads.google_ads.v5.services",
"invoice_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_ad_group_keyword_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_ad_group_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_campaign_keyword_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_campaign_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_idea_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_service_client": "google.ads.google_ads.v5.services",
"keyword_view_service_client": "google.ads.google_ads.v5.services",
"label_service_client": "google.ads.google_ads.v5.services",
"landing_page_view_service_client": "google.ads.google_ads.v5.services",
"language_constant_service_client": "google.ads.google_ads.v5.services",
"location_view_service_client": "google.ads.google_ads.v5.services",
"managed_placement_view_service_client": "google.ads.google_ads.v5.services",
"media_file_service_client": "google.ads.google_ads.v5.services",
"merchant_center_link_service_client": "google.ads.google_ads.v5.services",
"mobile_app_category_constant_service_client": "google.ads.google_ads.v5.services",
"mobile_device_constant_service_client": "google.ads.google_ads.v5.services",
"offline_user_data_job_service_client": "google.ads.google_ads.v5.services",
"operating_system_version_constant_service_client": "google.ads.google_ads.v5.services",
"paid_organic_search_term_view_service_client": "google.ads.google_ads.v5.services",
"parental_status_view_service_client": "google.ads.google_ads.v5.services",
"payments_account_service_client": "google.ads.google_ads.v5.services",
"product_bidding_category_constant_service_client": "google.ads.google_ads.v5.services",
"product_group_view_service_client": "google.ads.google_ads.v5.services",
"reach_plan_service_client": "google.ads.google_ads.v5.services",
"recommendation_service_client": "google.ads.google_ads.v5.services",
"remarketing_action_service_client": "google.ads.google_ads.v5.services",
"search_term_view_service_client": "google.ads.google_ads.v5.services",
"shared_criterion_service_client": "google.ads.google_ads.v5.services",
"shared_set_service_client": "google.ads.google_ads.v5.services",
"shopping_performance_view_service_client": "google.ads.google_ads.v5.services",
"third_party_app_analytics_link_service_client": "google.ads.google_ads.v5.services",
"topic_constant_service_client": "google.ads.google_ads.v5.services",
"topic_view_service_client": "google.ads.google_ads.v5.services",
"user_data_service_client": "google.ads.google_ads.v5.services",
"user_interest_service_client": "google.ads.google_ads.v5.services",
"user_list_service_client": "google.ads.google_ads.v5.services",
"user_location_view_service_client": "google.ads.google_ads.v5.services",
"video_service_client": "google.ads.google_ads.v5.services",
"account_budget_proposal_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"account_budget_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"account_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_asset_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_audience_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_bid_modifier_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_parameter_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_schedule_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"age_range_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"asset_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"batch_job_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"bidding_strategy_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"billing_setup_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_asset_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_audience_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_bid_modifier_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_budget_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_criterion_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_draft_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_experiment_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_shared_set_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"carrier_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"change_status_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"click_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_action_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_adjustment_upload_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_upload_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"currency_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"custom_interest_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_client_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_client_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_manager_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_negative_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"detail_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"display_keyword_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"distance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"domain_category_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"dynamic_search_ads_search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"expanded_landing_page_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"extension_feed_item_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_item_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_item_target_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_mapping_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_placeholder_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"gender_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"geo_target_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"geographic_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"google_ads_field_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"google_ads_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"group_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"hotel_group_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"hotel_performance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"income_range_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"invoice_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_ad_group_keyword_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_ad_group_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_campaign_keyword_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_campaign_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_idea_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"landing_page_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"language_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"location_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"managed_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"media_file_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"merchant_center_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"mobile_app_category_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"mobile_device_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"offline_user_data_job_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"operating_system_version_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"paid_organic_search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"parental_status_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"payments_account_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"product_bidding_category_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"product_group_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"reach_plan_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"recommendation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"remarketing_action_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shared_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shared_set_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shopping_performance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"third_party_app_analytics_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"topic_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"topic_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_data_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_interest_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_list_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_location_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"video_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
}
# Background on how this behaves: https://www.python.org/dev/peps/pep-0562/
def __getattr__(name): # Requires Python >= 3.7
"""Lazily perform imports and class definitions on first demand."""
if name == "__all__":
converted = (
util.convert_snake_case_to_upper_case(key)
for key in _lazy_name_to_package_map
)
all_names = sorted(converted)
globals()["__all__"] = all_names
return all_names
elif name.endswith("Transport"):
module = __getattr__(util.convert_upper_case_to_snake_case(name))
sub_mod_class = getattr(module, name)
klass = type(name, (sub_mod_class,), {"__doc__": sub_mod_class.__doc__})
globals()[name] = klass
return klass
elif name.endswith("ServiceClient"):
module = __getattr__(util.convert_upper_case_to_snake_case(name))
enums = __getattr__("enums")
sub_mod_class = getattr(module, name)
klass = type(
name,
(sub_mod_class,),
{"__doc__": sub_mod_class.__doc__, "enums": enums},
)
globals()[name] = klass
return klass
elif name == "enums":
path = "google.ads.google_ads.v5.services.enums"
module = importlib.import_module(path)
globals()[name] = module
return module
elif name == "types":
path = "google.ads.google_ads.v5.types"
module = importlib.import_module(path)
globals()[name] = module
return module
elif name in _lazy_name_to_package_map:
module = importlib.import_module(
f"{_lazy_name_to_package_map[name]}.{name}"
)
globals()[name] = module
return module
else:
raise AttributeError(f"unknown sub-module {name!r}.")
def __dir__():
return globals().get("__all__") or __getattr__("__all__")
if not sys.version_info >= (3, 7):
from pep562 import Pep562
Pep562(__name__)
| # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import sys
from google.ads.google_ads import util
if sys.version_info < (3, 6):
raise ImportError("This module requires Python 3.6 or later.")
_lazy_name_to_package_map = {
"account_budget_proposal_service_client": "google.ads.google_ads.v5.services",
"account_budget_service_client": "google.ads.google_ads.v5.services",
"account_link_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_asset_view_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_service_client": "google.ads.google_ads.v5.services",
"ad_group_audience_view_service_client": "google.ads.google_ads.v5.services",
"ad_group_bid_modifier_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_simulation_service_client": "google.ads.google_ads.v5.services",
"ad_group_extension_setting_service_client": "google.ads.google_ads.v5.services",
"ad_group_feed_service_client": "google.ads.google_ads.v5.services",
"ad_group_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_service_client": "google.ads.google_ads.v5.services",
"ad_group_simulation_service_client": "google.ads.google_ads.v5.services",
"ad_parameter_service_client": "google.ads.google_ads.v5.services",
"ad_schedule_view_service_client": "google.ads.google_ads.v5.services",
"ad_service_client": "google.ads.google_ads.v5.services",
"age_range_view_service_client": "google.ads.google_ads.v5.services",
"asset_service_client": "google.ads.google_ads.v5.services",
"batch_job_service_client": "google.ads.google_ads.v5.services",
"bidding_strategy_service_client": "google.ads.google_ads.v5.services",
"billing_setup_service_client": "google.ads.google_ads.v5.services",
"campaign_asset_service_client": "google.ads.google_ads.v5.services",
"campaign_audience_view_service_client": "google.ads.google_ads.v5.services",
"campaign_bid_modifier_service_client": "google.ads.google_ads.v5.services",
"campaign_budget_service_client": "google.ads.google_ads.v5.services",
"campaign_criterion_service_client": "google.ads.google_ads.v5.services",
"campaign_criterion_simulation_service_client": "google.ads.google_ads.v5.services",
"campaign_draft_service_client": "google.ads.google_ads.v5.services",
"campaign_experiment_service_client": "google.ads.google_ads.v5.services",
"campaign_extension_setting_service_client": "google.ads.google_ads.v5.services",
"campaign_feed_service_client": "google.ads.google_ads.v5.services",
"campaign_label_service_client": "google.ads.google_ads.v5.services",
"campaign_service_client": "google.ads.google_ads.v5.services",
"campaign_shared_set_service_client": "google.ads.google_ads.v5.services",
"carrier_constant_service_client": "google.ads.google_ads.v5.services",
"change_status_service_client": "google.ads.google_ads.v5.services",
"click_view_service_client": "google.ads.google_ads.v5.services",
"conversion_action_service_client": "google.ads.google_ads.v5.services",
"conversion_adjustment_upload_service_client": "google.ads.google_ads.v5.services",
"conversion_upload_service_client": "google.ads.google_ads.v5.services",
"currency_constant_service_client": "google.ads.google_ads.v5.services",
"custom_interest_service_client": "google.ads.google_ads.v5.services",
"customer_client_link_service_client": "google.ads.google_ads.v5.services",
"customer_client_service_client": "google.ads.google_ads.v5.services",
"customer_extension_setting_service_client": "google.ads.google_ads.v5.services",
"customer_feed_service_client": "google.ads.google_ads.v5.services",
"customer_label_service_client": "google.ads.google_ads.v5.services",
"customer_manager_link_service_client": "google.ads.google_ads.v5.services",
"customer_negative_criterion_service_client": "google.ads.google_ads.v5.services",
"customer_service_client": "google.ads.google_ads.v5.services",
"detail_placement_view_service_client": "google.ads.google_ads.v5.services",
"display_keyword_view_service_client": "google.ads.google_ads.v5.services",
"distance_view_service_client": "google.ads.google_ads.v5.services",
"domain_category_service_client": "google.ads.google_ads.v5.services",
"dynamic_search_ads_search_term_view_service_client": "google.ads.google_ads.v5.services",
"expanded_landing_page_view_service_client": "google.ads.google_ads.v5.services",
"extension_feed_item_service_client": "google.ads.google_ads.v5.services",
"feed_item_service_client": "google.ads.google_ads.v5.services",
"feed_item_target_service_client": "google.ads.google_ads.v5.services",
"feed_mapping_service_client": "google.ads.google_ads.v5.services",
"feed_placeholder_view_service_client": "google.ads.google_ads.v5.services",
"feed_service_client": "google.ads.google_ads.v5.services",
"gender_view_service_client": "google.ads.google_ads.v5.services",
"geo_target_constant_service_client": "google.ads.google_ads.v5.services",
"geographic_view_service_client": "google.ads.google_ads.v5.services",
"google_ads_field_service_client": "google.ads.google_ads.v5.services",
"google_ads_service_client": "google.ads.google_ads.v5.services",
"group_placement_view_service_client": "google.ads.google_ads.v5.services",
"hotel_group_view_service_client": "google.ads.google_ads.v5.services",
"hotel_performance_view_service_client": "google.ads.google_ads.v5.services",
"income_range_view_service_client": "google.ads.google_ads.v5.services",
"invoice_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_ad_group_keyword_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_ad_group_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_campaign_keyword_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_campaign_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_idea_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_service_client": "google.ads.google_ads.v5.services",
"keyword_view_service_client": "google.ads.google_ads.v5.services",
"label_service_client": "google.ads.google_ads.v5.services",
"landing_page_view_service_client": "google.ads.google_ads.v5.services",
"language_constant_service_client": "google.ads.google_ads.v5.services",
"location_view_service_client": "google.ads.google_ads.v5.services",
"managed_placement_view_service_client": "google.ads.google_ads.v5.services",
"media_file_service_client": "google.ads.google_ads.v5.services",
"merchant_center_link_service_client": "google.ads.google_ads.v5.services",
"mobile_app_category_constant_service_client": "google.ads.google_ads.v5.services",
"mobile_device_constant_service_client": "google.ads.google_ads.v5.services",
"offline_user_data_job_service_client": "google.ads.google_ads.v5.services",
"operating_system_version_constant_service_client": "google.ads.google_ads.v5.services",
"paid_organic_search_term_view_service_client": "google.ads.google_ads.v5.services",
"parental_status_view_service_client": "google.ads.google_ads.v5.services",
"payments_account_service_client": "google.ads.google_ads.v5.services",
"product_bidding_category_constant_service_client": "google.ads.google_ads.v5.services",
"product_group_view_service_client": "google.ads.google_ads.v5.services",
"reach_plan_service_client": "google.ads.google_ads.v5.services",
"recommendation_service_client": "google.ads.google_ads.v5.services",
"remarketing_action_service_client": "google.ads.google_ads.v5.services",
"search_term_view_service_client": "google.ads.google_ads.v5.services",
"shared_criterion_service_client": "google.ads.google_ads.v5.services",
"shared_set_service_client": "google.ads.google_ads.v5.services",
"shopping_performance_view_service_client": "google.ads.google_ads.v5.services",
"third_party_app_analytics_link_service_client": "google.ads.google_ads.v5.services",
"topic_constant_service_client": "google.ads.google_ads.v5.services",
"topic_view_service_client": "google.ads.google_ads.v5.services",
"user_data_service_client": "google.ads.google_ads.v5.services",
"user_interest_service_client": "google.ads.google_ads.v5.services",
"user_list_service_client": "google.ads.google_ads.v5.services",
"user_location_view_service_client": "google.ads.google_ads.v5.services",
"video_service_client": "google.ads.google_ads.v5.services",
"account_budget_proposal_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"account_budget_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"account_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_asset_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_audience_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_bid_modifier_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_parameter_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_schedule_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"age_range_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"asset_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"batch_job_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"bidding_strategy_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"billing_setup_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_asset_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_audience_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_bid_modifier_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_budget_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_criterion_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_draft_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_experiment_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_shared_set_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"carrier_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"change_status_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"click_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_action_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_adjustment_upload_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_upload_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"currency_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"custom_interest_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_client_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_client_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_manager_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_negative_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"detail_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"display_keyword_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"distance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"domain_category_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"dynamic_search_ads_search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"expanded_landing_page_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"extension_feed_item_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_item_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_item_target_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_mapping_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_placeholder_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"gender_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"geo_target_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"geographic_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"google_ads_field_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"google_ads_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"group_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"hotel_group_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"hotel_performance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"income_range_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"invoice_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_ad_group_keyword_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_ad_group_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_campaign_keyword_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_campaign_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_idea_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"landing_page_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"language_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"location_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"managed_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"media_file_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"merchant_center_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"mobile_app_category_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"mobile_device_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"offline_user_data_job_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"operating_system_version_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"paid_organic_search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"parental_status_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"payments_account_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"product_bidding_category_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"product_group_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"reach_plan_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"recommendation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"remarketing_action_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shared_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shared_set_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shopping_performance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"third_party_app_analytics_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"topic_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"topic_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_data_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_interest_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_list_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_location_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"video_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
}
# Background on how this behaves: https://www.python.org/dev/peps/pep-0562/
def __getattr__(name): # Requires Python >= 3.7
"""Lazily perform imports and class definitions on first demand."""
if name == "__all__":
converted = (
util.convert_snake_case_to_upper_case(key)
for key in _lazy_name_to_package_map
)
all_names = sorted(converted)
globals()["__all__"] = all_names
return all_names
elif name.endswith("Transport"):
module = __getattr__(util.convert_upper_case_to_snake_case(name))
sub_mod_class = getattr(module, name)
klass = type(name, (sub_mod_class,), {"__doc__": sub_mod_class.__doc__})
globals()[name] = klass
return klass
elif name.endswith("ServiceClient"):
module = __getattr__(util.convert_upper_case_to_snake_case(name))
enums = __getattr__("enums")
sub_mod_class = getattr(module, name)
klass = type(
name,
(sub_mod_class,),
{"__doc__": sub_mod_class.__doc__, "enums": enums},
)
globals()[name] = klass
return klass
elif name == "enums":
path = "google.ads.google_ads.v5.services.enums"
module = importlib.import_module(path)
globals()[name] = module
return module
elif name == "types":
path = "google.ads.google_ads.v5.types"
module = importlib.import_module(path)
globals()[name] = module
return module
elif name in _lazy_name_to_package_map:
module = importlib.import_module(
f"{_lazy_name_to_package_map[name]}.{name}"
)
globals()[name] = module
return module
else:
raise AttributeError(f"unknown sub-module {name!r}.")
def __dir__():
return globals().get("__all__") or __getattr__("__all__")
if not sys.version_info >= (3, 7):
from pep562 import Pep562
Pep562(__name__)
| en | 0.84044 | # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Background on how this behaves: https://www.python.org/dev/peps/pep-0562/ # Requires Python >= 3.7 Lazily perform imports and class definitions on first demand. | 1.399003 | 1 |
PNN/model.py | jingxiufenghua/rec-model | 1,323 | 10654 | <reponame>jingxiufenghua/rec-model
"""
Created on July 20, 2020
Updated on May 19, 2021
model: Product-based Neural Networks for User Response Prediction
@author: <NAME>(<EMAIL>)
"""
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Embedding, Dense, Layer, Dropout, Input
from modules import DNN
class PNN(Model):
def __init__(self, feature_columns, hidden_units, mode='in', dnn_dropout=0.,
activation='relu', embed_reg=1e-6, w_z_reg=1e-6, w_p_reg=1e-6, l_b_reg=1e-6):
"""
Product-based Neural Networks
:param feature_columns: A list. sparse column feature information.
:param hidden_units: A list. Neural network hidden units.
:param mode: A string. 'in' IPNN or 'out'OPNN.
:param activation: A string. Activation function of dnn.
:param dnn_dropout: A scalar. Dropout of dnn.
:param embed_reg: A scalar. The regularizer of embedding.
:param w_z_reg: A scalar. The regularizer of w_z_ in product layer
:param w_p_reg: A scalar. The regularizer of w_p in product layer
:param l_b_reg: A scalar. The regularizer of l_b in product layer
"""
super(PNN, self).__init__()
# inner product or outer product
self.mode = mode
self.sparse_feature_columns = feature_columns
# the number of feature fields
self.field_num = len(self.sparse_feature_columns)
self.embed_dim = self.sparse_feature_columns[0]['embed_dim']
# The embedding dimension of each feature field must be the same
self.embed_layers = {
'embed_' + str(i): Embedding(input_dim=feat['feat_num'],
input_length=1,
output_dim=feat['embed_dim'],
embeddings_initializer='random_uniform',
embeddings_regularizer=l2(embed_reg))
for i, feat in enumerate(self.sparse_feature_columns)
}
# parameters
self.w_z = self.add_weight(name='w_z',
shape=(self.field_num, self.embed_dim, hidden_units[0]),
initializer='random_uniform',
regularizer=l2(w_z_reg),
trainable=True
)
if mode == 'in':
self.w_p = self.add_weight(name='w_p',
shape=(self.field_num * (self.field_num - 1) // 2, self.embed_dim,
hidden_units[0]),
initializer='random_uniform',
reguarizer=l2(w_p_reg),
trainable=True)
# out
else:
self.w_p = self.add_weight(name='w_p',
shape=(self.field_num * (self.field_num - 1) // 2, self.embed_dim,
self.embed_dim, hidden_units[0]),
initializer='random_uniform',
regularizer=l2(w_p_reg),
trainable=True)
self.l_b = self.add_weight(name='l_b', shape=(hidden_units[0], ),
initializer='random_uniform',
regularizer=l2(l_b_reg),
trainable=True)
# dnn
self.dnn_network = DNN(hidden_units[1:], activation, dnn_dropout)
self.dense_final = Dense(1)
def call(self, inputs):
sparse_inputs = inputs
sparse_embed = [self.embed_layers['embed_{}'.format(i)](sparse_inputs[:, i])
for i in range(sparse_inputs.shape[1])]
sparse_embed = tf.transpose(tf.convert_to_tensor(sparse_embed), [1, 0, 2]) # (None, field_num, embed_dim)
# product layer
row = []
col = []
for i in range(len(self.sparse_feature_columns) - 1):
for j in range(i + 1, len(self.sparse_feature_columns)):
row.append(i)
col.append(j)
p = tf.gather(sparse_embed, row, axis=1)
q = tf.gather(sparse_embed, col, axis=1)
if self.mode == 'in':
l_p = tf.tensordot(p*q, self.w_p, axes=2) # (None, hidden[0])
else: # out
u = tf.expand_dims(q, 2) # (None, field_num(field_num-1)/2, 1, emb_dim)
v = tf.expand_dims(p, 2) # (None, field_num(field_num-1)/2, 1, emb_dim)
l_p = tf.tensordot(tf.matmul(tf.transpose(u, [0, 1, 3, 2]), v), self.w_p, axes=3) # (None, hidden[0])
l_z = tf.tensordot(sparse_embed, self.w_z, axes=2) # (None, hidden[0])
l_1 = tf.nn.relu(tf.concat([l_z + l_p + self.l_b], axis=-1))
# dnn layer
dnn_x = self.dnn_network(l_1)
outputs = tf.nn.sigmoid(self.dense_final(dnn_x))
return outputs
def summary(self):
sparse_inputs = Input(shape=(len(self.sparse_feature_columns),), dtype=tf.int32)
Model(inputs=sparse_inputs, outputs=self.call(sparse_inputs)).summary()
| """
Created on July 20, 2020
Updated on May 19, 2021
model: Product-based Neural Networks for User Response Prediction
@author: <NAME>(<EMAIL>)
"""
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Embedding, Dense, Layer, Dropout, Input
from modules import DNN
class PNN(Model):
def __init__(self, feature_columns, hidden_units, mode='in', dnn_dropout=0.,
activation='relu', embed_reg=1e-6, w_z_reg=1e-6, w_p_reg=1e-6, l_b_reg=1e-6):
"""
Product-based Neural Networks
:param feature_columns: A list. sparse column feature information.
:param hidden_units: A list. Neural network hidden units.
:param mode: A string. 'in' IPNN or 'out'OPNN.
:param activation: A string. Activation function of dnn.
:param dnn_dropout: A scalar. Dropout of dnn.
:param embed_reg: A scalar. The regularizer of embedding.
:param w_z_reg: A scalar. The regularizer of w_z_ in product layer
:param w_p_reg: A scalar. The regularizer of w_p in product layer
:param l_b_reg: A scalar. The regularizer of l_b in product layer
"""
super(PNN, self).__init__()
# inner product or outer product
self.mode = mode
self.sparse_feature_columns = feature_columns
# the number of feature fields
self.field_num = len(self.sparse_feature_columns)
self.embed_dim = self.sparse_feature_columns[0]['embed_dim']
# The embedding dimension of each feature field must be the same
self.embed_layers = {
'embed_' + str(i): Embedding(input_dim=feat['feat_num'],
input_length=1,
output_dim=feat['embed_dim'],
embeddings_initializer='random_uniform',
embeddings_regularizer=l2(embed_reg))
for i, feat in enumerate(self.sparse_feature_columns)
}
# parameters
self.w_z = self.add_weight(name='w_z',
shape=(self.field_num, self.embed_dim, hidden_units[0]),
initializer='random_uniform',
regularizer=l2(w_z_reg),
trainable=True
)
if mode == 'in':
self.w_p = self.add_weight(name='w_p',
shape=(self.field_num * (self.field_num - 1) // 2, self.embed_dim,
hidden_units[0]),
initializer='random_uniform',
reguarizer=l2(w_p_reg),
trainable=True)
# out
else:
self.w_p = self.add_weight(name='w_p',
shape=(self.field_num * (self.field_num - 1) // 2, self.embed_dim,
self.embed_dim, hidden_units[0]),
initializer='random_uniform',
regularizer=l2(w_p_reg),
trainable=True)
self.l_b = self.add_weight(name='l_b', shape=(hidden_units[0], ),
initializer='random_uniform',
regularizer=l2(l_b_reg),
trainable=True)
# dnn
self.dnn_network = DNN(hidden_units[1:], activation, dnn_dropout)
self.dense_final = Dense(1)
def call(self, inputs):
sparse_inputs = inputs
sparse_embed = [self.embed_layers['embed_{}'.format(i)](sparse_inputs[:, i])
for i in range(sparse_inputs.shape[1])]
sparse_embed = tf.transpose(tf.convert_to_tensor(sparse_embed), [1, 0, 2]) # (None, field_num, embed_dim)
# product layer
row = []
col = []
for i in range(len(self.sparse_feature_columns) - 1):
for j in range(i + 1, len(self.sparse_feature_columns)):
row.append(i)
col.append(j)
p = tf.gather(sparse_embed, row, axis=1)
q = tf.gather(sparse_embed, col, axis=1)
if self.mode == 'in':
l_p = tf.tensordot(p*q, self.w_p, axes=2) # (None, hidden[0])
else: # out
u = tf.expand_dims(q, 2) # (None, field_num(field_num-1)/2, 1, emb_dim)
v = tf.expand_dims(p, 2) # (None, field_num(field_num-1)/2, 1, emb_dim)
l_p = tf.tensordot(tf.matmul(tf.transpose(u, [0, 1, 3, 2]), v), self.w_p, axes=3) # (None, hidden[0])
l_z = tf.tensordot(sparse_embed, self.w_z, axes=2) # (None, hidden[0])
l_1 = tf.nn.relu(tf.concat([l_z + l_p + self.l_b], axis=-1))
# dnn layer
dnn_x = self.dnn_network(l_1)
outputs = tf.nn.sigmoid(self.dense_final(dnn_x))
return outputs
def summary(self):
sparse_inputs = Input(shape=(len(self.sparse_feature_columns),), dtype=tf.int32)
Model(inputs=sparse_inputs, outputs=self.call(sparse_inputs)).summary() | en | 0.6842 | Created on July 20, 2020 Updated on May 19, 2021 model: Product-based Neural Networks for User Response Prediction @author: <NAME>(<EMAIL>) Product-based Neural Networks :param feature_columns: A list. sparse column feature information. :param hidden_units: A list. Neural network hidden units. :param mode: A string. 'in' IPNN or 'out'OPNN. :param activation: A string. Activation function of dnn. :param dnn_dropout: A scalar. Dropout of dnn. :param embed_reg: A scalar. The regularizer of embedding. :param w_z_reg: A scalar. The regularizer of w_z_ in product layer :param w_p_reg: A scalar. The regularizer of w_p in product layer :param l_b_reg: A scalar. The regularizer of l_b in product layer # inner product or outer product # the number of feature fields # The embedding dimension of each feature field must be the same # parameters # out # dnn # (None, field_num, embed_dim) # product layer # (None, hidden[0]) # out # (None, field_num(field_num-1)/2, 1, emb_dim) # (None, field_num(field_num-1)/2, 1, emb_dim) # (None, hidden[0]) # (None, hidden[0]) # dnn layer | 3.030722 | 3 |
exercicio3.py | DrokaGit/-infosatc-lp-avaliativo-02 | 0 | 10655 | nume1 = int(input("Digite um numero"))
nume2 = int(input("Digite um numero"))
nume3 = int(input("Digite um numero"))
nume4 = int(input("Digite um numero"))
nume5 = int(input("Digite um numero"))
table = [nume1,nume2,nume3,nume4,nume5]
tableM = (float((nume1 + nume2 + nume3 + nume4 + nume5)))
print(float(tableM)) | nume1 = int(input("Digite um numero"))
nume2 = int(input("Digite um numero"))
nume3 = int(input("Digite um numero"))
nume4 = int(input("Digite um numero"))
nume5 = int(input("Digite um numero"))
table = [nume1,nume2,nume3,nume4,nume5]
tableM = (float((nume1 + nume2 + nume3 + nume4 + nume5)))
print(float(tableM)) | none | 1 | 3.70969 | 4 |
|
platonic/platonic/box/implementation.py | anatoly-scherbakov/platonic | 1 | 10656 | <filename>platonic/platonic/box/implementation.py
from typing import TypeVar
from .abstract import AbstractBox
T = TypeVar('T')
class ValueBox(AbstractBox[T]):
_value: T
@property
def value(self) -> T:
return self._value
@value.setter
def value(self, value: T):
self._value = value
| <filename>platonic/platonic/box/implementation.py
from typing import TypeVar
from .abstract import AbstractBox
T = TypeVar('T')
class ValueBox(AbstractBox[T]):
_value: T
@property
def value(self) -> T:
return self._value
@value.setter
def value(self, value: T):
self._value = value
| none | 1 | 2.999521 | 3 |
|
Schemas/Subject.py | esot0/jmsa-tutoring-backend | 0 | 10657 | <reponame>esot0/jmsa-tutoring-backend<filename>Schemas/Subject.py
from mongoengine import *
class Subject(Document):
subject = StringField() | from mongoengine import *
class Subject(Document):
subject = StringField() | none | 1 | 1.912659 | 2 |
|
39. Combination Sum.py | MapleLove2014/leetcode | 1 | 10658 | class Solution:
def combinationSum(self, candidates, target):
def lookup(candidates, index, target, combine, result):
if target == 0:
result.append(combine)
return
if index >= len(candidates) and target > 0:
return
if target >= candidates[index]:
lookup(candidates, index, target - candidates[index], list(combine) + [candidates[index]], result)
lookup(candidates, index + 1, target, list(combine), result)
sorted(candidates)
result = []
lookup(candidates, 0, target, [], result)
return result
s = Solution()
print(s.combinationSum([2,3,6,7], 7))
print(s.combinationSum([2,3,5], 8))
| class Solution:
def combinationSum(self, candidates, target):
def lookup(candidates, index, target, combine, result):
if target == 0:
result.append(combine)
return
if index >= len(candidates) and target > 0:
return
if target >= candidates[index]:
lookup(candidates, index, target - candidates[index], list(combine) + [candidates[index]], result)
lookup(candidates, index + 1, target, list(combine), result)
sorted(candidates)
result = []
lookup(candidates, 0, target, [], result)
return result
s = Solution()
print(s.combinationSum([2,3,6,7], 7))
print(s.combinationSum([2,3,5], 8))
| none | 1 | 3.487942 | 3 |
|
crawl_comments.py | tosh1ki/NicoCrawler | 1 | 10659 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__doc__ = '''
Crawl comment from nicovideo.jp
Usage:
crawl_comments.py --url <url> --mail <mail> --pass <pass> [--sqlite <sqlite>] [--csv <csv>]
Options:
--url <url>
--mail <mail>
--pass <pass>
--sqlite <sqlite> (optional) path of comment DB [default: comments.sqlite3]
--csv <csv> (optional) path of csv file contains urls of videos [default: crawled.csv]
'''
from docopt import docopt
from nicocrawler.nicocrawler import NicoCrawler
if __name__ == '__main__':
# コマンドライン引数の取得
args = docopt(__doc__)
url_channel_toppage = args['--url']
login_mail = args['--mail']
login_pass = args['--pass']
path_sqlite = args['--sqlite']
path_csv = args['--csv']
ncrawler = NicoCrawler(login_mail, login_pass)
ncrawler.connect_sqlite(path_sqlite)
df = ncrawler.get_all_video_url_of_season(url_channel_toppage)
ncrawler.initialize_csv_from_db(path_csv)
# # デイリーランキング1~300位の動画を取得する
# url = 'http://www.nicovideo.jp/ranking/fav/daily/all'
# ncrawler.initialize_csv_from_url(url, path_csv, max_page=3)
# ncrawler.get_all_comments_of_csv(path_csv, max_n_iter=1)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
__doc__ = '''
Crawl comment from nicovideo.jp
Usage:
crawl_comments.py --url <url> --mail <mail> --pass <pass> [--sqlite <sqlite>] [--csv <csv>]
Options:
--url <url>
--mail <mail>
--pass <pass>
--sqlite <sqlite> (optional) path of comment DB [default: comments.sqlite3]
--csv <csv> (optional) path of csv file contains urls of videos [default: crawled.csv]
'''
from docopt import docopt
from nicocrawler.nicocrawler import NicoCrawler
if __name__ == '__main__':
# コマンドライン引数の取得
args = docopt(__doc__)
url_channel_toppage = args['--url']
login_mail = args['--mail']
login_pass = args['--pass']
path_sqlite = args['--sqlite']
path_csv = args['--csv']
ncrawler = NicoCrawler(login_mail, login_pass)
ncrawler.connect_sqlite(path_sqlite)
df = ncrawler.get_all_video_url_of_season(url_channel_toppage)
ncrawler.initialize_csv_from_db(path_csv)
# # デイリーランキング1~300位の動画を取得する
# url = 'http://www.nicovideo.jp/ranking/fav/daily/all'
# ncrawler.initialize_csv_from_url(url, path_csv, max_page=3)
# ncrawler.get_all_comments_of_csv(path_csv, max_n_iter=1)
| en | 0.181409 | #!/usr/bin/env python # -*- coding: utf-8 -*- Crawl comment from nicovideo.jp Usage: crawl_comments.py --url <url> --mail <mail> --pass <pass> [--sqlite <sqlite>] [--csv <csv>] Options: --url <url> --mail <mail> --pass <pass> --sqlite <sqlite> (optional) path of comment DB [default: comments.sqlite3] --csv <csv> (optional) path of csv file contains urls of videos [default: crawled.csv] # コマンドライン引数の取得 # # デイリーランキング1~300位の動画を取得する # url = 'http://www.nicovideo.jp/ranking/fav/daily/all' # ncrawler.initialize_csv_from_url(url, path_csv, max_page=3) # ncrawler.get_all_comments_of_csv(path_csv, max_n_iter=1) | 2.58352 | 3 |
sif/greedy_sim_max.py | longland-m/wikigen | 0 | 10660 | # Functions to do the greedy similarity maximisation for article:node assignments
# All code is original
import random
def computeSimSum(G, similarityMatrix, asgn):
""" Compute the total similarity sum for the current node:article assignment """
S = sum([similarityMatrix[asgn[j], asgn[i]]
for j in range(len(G)) for i in list(G[j])])
return S
def greedySimilarityMax(G, similarityMatrix, nrounds=5):
pairList = [(a,b) for a in range(len(G)) for b in range(a)]
maxSimSums = []
asgns = []
for i in range(nrounds):
# get random indices for initial node:article assignment
init_ids = list(range(len(G)))
random.shuffle(init_ids)
# assign articles to nodes and compute initial similarity sum
curAsgn = dict((key, init_ids[key]) for key in range(len(G)))
curSimSum = computeSimSum(G, similarityMatrix, curAsgn)
# maximisation loop - repeats until S can't increase
while True:
# for each node pair, swap the nodes recompute similarity sum
simSums = []
for edge in pairList:
tempAsgn = dict(curAsgn)
tempAsgn[edge[0]] = curAsgn[edge[1]]
tempAsgn[edge[1]] = curAsgn[edge[0]]
# Recompute similarity sum
tempSimSum = computeSimSum(G, similarityMatrix, tempAsgn)
simSums.append(tempSimSum)
# find the max possible new similarity score
# then update curAsgn if the new max score > old score
maxNewSimSum = max(simSums)
if maxNewSimSum > curSimSum:
nodesToSwap = pairList[simSums.index(maxNewSimSum)]
oldAsgn = dict(curAsgn)
curAsgn[nodesToSwap[0]] = oldAsgn[nodesToSwap[1]]
curAsgn[nodesToSwap[1]] = oldAsgn[nodesToSwap[0]]
curSimSum = maxNewSimSum # no need to recompute, know the value already
else:
break
maxSimSums.append(curSimSum)
asgns.append(curAsgn)
bestRound = maxSimSums.index(max(maxSimSums))
bestAsgn = asgns[bestRound]
print('Best S = ' + str(maxSimSums[bestRound]))
return bestAsgn
| # Functions to do the greedy similarity maximisation for article:node assignments
# All code is original
import random
def computeSimSum(G, similarityMatrix, asgn):
""" Compute the total similarity sum for the current node:article assignment """
S = sum([similarityMatrix[asgn[j], asgn[i]]
for j in range(len(G)) for i in list(G[j])])
return S
def greedySimilarityMax(G, similarityMatrix, nrounds=5):
pairList = [(a,b) for a in range(len(G)) for b in range(a)]
maxSimSums = []
asgns = []
for i in range(nrounds):
# get random indices for initial node:article assignment
init_ids = list(range(len(G)))
random.shuffle(init_ids)
# assign articles to nodes and compute initial similarity sum
curAsgn = dict((key, init_ids[key]) for key in range(len(G)))
curSimSum = computeSimSum(G, similarityMatrix, curAsgn)
# maximisation loop - repeats until S can't increase
while True:
# for each node pair, swap the nodes recompute similarity sum
simSums = []
for edge in pairList:
tempAsgn = dict(curAsgn)
tempAsgn[edge[0]] = curAsgn[edge[1]]
tempAsgn[edge[1]] = curAsgn[edge[0]]
# Recompute similarity sum
tempSimSum = computeSimSum(G, similarityMatrix, tempAsgn)
simSums.append(tempSimSum)
# find the max possible new similarity score
# then update curAsgn if the new max score > old score
maxNewSimSum = max(simSums)
if maxNewSimSum > curSimSum:
nodesToSwap = pairList[simSums.index(maxNewSimSum)]
oldAsgn = dict(curAsgn)
curAsgn[nodesToSwap[0]] = oldAsgn[nodesToSwap[1]]
curAsgn[nodesToSwap[1]] = oldAsgn[nodesToSwap[0]]
curSimSum = maxNewSimSum # no need to recompute, know the value already
else:
break
maxSimSums.append(curSimSum)
asgns.append(curAsgn)
bestRound = maxSimSums.index(max(maxSimSums))
bestAsgn = asgns[bestRound]
print('Best S = ' + str(maxSimSums[bestRound]))
return bestAsgn
| en | 0.746283 | # Functions to do the greedy similarity maximisation for article:node assignments # All code is original Compute the total similarity sum for the current node:article assignment # get random indices for initial node:article assignment # assign articles to nodes and compute initial similarity sum # maximisation loop - repeats until S can't increase # for each node pair, swap the nodes recompute similarity sum # Recompute similarity sum # find the max possible new similarity score # then update curAsgn if the new max score > old score # no need to recompute, know the value already | 2.86499 | 3 |
plab/photon_counters/Idq801.py | joamatab/photonic-coupling-drivers | 0 | 10661 | import sys
import numpy as np
import shutil
import time
import itertools as it
import collections
import ctypes as ct
import os
import copy
sys.path.append(os.path.dirname(__file__))
from ThreadStoppable import ThreadStoppable
class Idq801(object):
def __init__(
self,
deviceId=-1,
timestamp_buffer_size=int(1e6),
integration_time_ms=0.5 * 1e3,
coincidence_window_bins=1000,
max_retry=3,
delay_retry_sec=0.01,
clean_data_directory=False,
data_directory="Idq801Data",
processing="external",
):
self._max_retry = max_retry
self._set_check_delay = delay_retry_sec # Delay in seconds between setting and
# checking that a parameter was set.
self._data_directory = data_directory
self._wait_for_settings = 1
self._processing_dict = {"i": "internal", "e": "external"}
processing = processing.lower()
assert processing in self._processing.values()
self._processing = processing
if not os.path.isdir(data_directory):
os.mkdir(data_directory)
if clean_data_directory:
self.clean_data_directory()
module_path = os.path.dirname(__file__) + "/"
if sys.platform == "linux":
self.idq801Lib = ct.CDLL(module_path + "libtdcbase.so")
elif sys.platform == "win32":
self.idq801Lib = ct.CDLL(module_path + "./tdcbase.dll")
else:
raise OSError("Invalid operating system")
if self.idq801Lib.TDC_init(deviceId):
raise RuntimeError("Could not connect to the ID801 counter.")
# Initial parameters.
self.unset_channel(-1)
self.set_timestamp_buffer_size(timestamp_buffer_size)
self.integration_time_ms = integration_time_ms
if self._processing == self._processing_dict["i"]:
self.set_integration_time(integration_time_ms)
else:
self.set_integration_time(1.0e-3) # 1us integration time.
self.set_coincidence_window_bins(1000)
self._time_last_get_timestamps = time.time()
self.channel_delays = {
"1": 0,
"2": 0,
"3": 0,
"4": 0,
"5": 0,
"6": 0,
"7": 0,
"8": 0,
}
self.set_channel_delays_ns(self.channel_delays)
self.accidental_delay = 0
def __del__(self):
self.idq801Lib.TDC_deInit()
def _set_value(self, set_value, setter, getter):
"""Sets a value and makes sure it was set."""
attempt = 0
is_set = False
while not is_set and attempt < self._max_retry:
attempt += 1
setter(set_value)
time.sleep(self._set_check_delay)
try:
if list(set_value) == list(getter()):
is_set = True
except TypeError:
if set_value == getter():
is_set = True
if not is_set:
raise RuntimeError(
"Unable to set the value using %s to %s after %i attempts."
% (setter.__name__, str(set_value), self._max_retry)
)
def _get_device_params(self):
cm = ct.c_int32()
cw = ct.c_int32()
ew = ct.c_int32()
self.idq801Lib.TDC_getDeviceParams(ct.byref(cm), ct.byref(cw), ct.byref(ew))
return (cm, cw, ew)
def _set_processing(self, processing):
processing = processing.lower()
assert processing in self._processing_dict.values()
self._processing = processing
if processing == self._processing_dict["i"]:
self.set_integration_time(self.integration_time_ms)
return self._processing
def set_processing_internal(self):
return self._set_processing("internal")
def set_processing_external(self):
return self._set_processing("external")
def clean_data_directory(self):
"""
Deletes all data in the `Idq801Data` directory.
"""
shutil.rmtree(self._data_directory, ignore_errors=True)
os.mkdir(self._data_directory)
def get_timebase(self):
self.idq801Lib.TDC_getTimebase.restype = ct.c_double
tb = self.idq801Lib.TDC_getTimebase()
return tb
def get_mask_channels(self):
cm, _, _ = self._get_device_params()
return cm.value
def get_status_channels(self):
cm, cw, ew = self._get_device_params()
channels_enabled = [bool(int(c)) for c in bin(cm.value)[2:]][::-1]
padLength = 8 - len(channels_enabled)
channels_enabled.extend([False] * padLength)
return tuple(channels_enabled)
def get_enabled_channels(self):
channels_status = self.get_status_channels()
channels_enabled = tuple(
i + 1 for i, v in enumerate(channels_status) if v == True
)
return channels_enabled
def get_disabled_channels(self):
channels_status = self.get_status_channels()
channels_disabled = tuple(
i + 1 for i, v in enumerate(channels_status) if v == False
)
return channels_disabled
def is_channel_enabled(self, channel):
assert 1 <= channel <= 8, "Invalid choice channel range."
channel -= 1
channel_status = self.get_status_channels()[channel]
return channel_status
def _get_channel_mask(self, channel, set_unset):
def channel_mask_from_channel_list(channels_enabled):
channel_mask = 0
for b in channels_enabled[::-1]:
channel_mask = (channel_mask << b - 1) | True
return channel_mask
set_unset = set_unset.lower()
assert set_unset in ("set", "unset"), (
"Invalid `set_unset` choice %s." % set_unset
)
if isinstance(channel, str):
channel = channel.lower()
if channel == "all" or channel == -1:
channel_mask = 0xFF
elif channel in range(1, 9):
channel_mask = 1 << channel
elif isinstance(channel, collections.Iterable):
channel_mask = channel_mask_from_channel_list(channel)
else:
raise TypeError("Invalid `channel` choice.")
if set_unset == "unset":
channel_mask ^= 0xFF
return channel_mask
def _set_unset_channel(self, channel, set_unset):
self._channel_mask = self._get_channel_mask(channel, set_unset)
self._set_value(
self._channel_mask,
self.idq801Lib.TDC_enableChannels,
self.get_mask_channels,
)
return self._channel_mask
def set_channel(self, channel):
"""Choose which channels to enable.
Options include:
* -1 or 'all' for (all channels).
* A single number for channel to be enabled.
* An iterable containing the channels
to be enables. e.g. (1,4,5)
* Default is no channels are enabled.
"""
return self._set_unset_channel(channel, "set")
def unset_channel(self, channel):
"""Choose which channels to disable.
Options include:
* -1 or 'all' for (all channels).
* A single number for channel to be disabled.
* An iterable containing the channels
to be disables. e.g. (1,4,5)
* Default is no channels are disabled.
"""
return self._set_unset_channel(channel, "unset")
def get_coincidence_window_bins(self):
cm, cw, ew = self._get_device_params()
return cw.value
def get_coincidence_window_ns(self):
bin = self.get_timebase()
return bin * self.get_coincidence_window_bins() * 1e9
def set_coincidence_window_bins(self, coincidence_window_bins):
coincidence_window_bins = int(coincidence_window_bins)
if not 0 < coincidence_window_bins <= 65535:
raise ValueError(
"The chosen number of coincidence \
window bins is not in the range (0,65535]."
)
self._set_value(
coincidence_window_bins,
self.idq801Lib.TDC_setCoincidenceWindow,
self.get_coincidence_window_bins,
)
def set_coincidence_window_ns(self, coincidence_window_ns):
bin = self.get_timebase()
coincidence_window_bins = int(coincidence_window_ns * 1e-9 / bin)
return self.set_coincidence_window_bins(coincidence_window_bins)
def get_integration_time(self):
cm, cw, ew = self._get_device_params()
return ew.value
def freeze_buffers(self):
self.idq801Lib.TDC_freezeBuffers(True)
def unfreeze_buffers(self):
self.idq801Lib.TDC_freezeBuffers(False)
def set_integration_time(self, window_time_ms):
window_time_ms = round(window_time_ms)
if self._processing == self._processing_dict["i"]:
if not 0 < window_time_ms <= 65535:
raise ValueError(
"The chosen exposure window is not \
in the range (0,65535]. Can't do more than 65.5s \
integration time internally."
)
self._set_value(
self.window_time_ms,
self.idq801Lib.TDC_setExposureTime,
self.get_integration_time,
)
def get_data_lost_status(self):
"""Returns true if data is being lost, and false
if data is not being lost.
"""
# Get the status of the lost latch.
lost = ct.c_int32()
self.idq801Lib.TDC_getDataLost(ct.byref(lost))
latch = lost.value
# Calls the function again to clear the lost latch.
self.idq801Lib.TDC_getDataLost(ct.byref(lost))
return latch
def get_timestamp_buffer_size(self):
size = ct.c_int32()
self.idq801Lib.TDC_getTimestampBufferSize(ct.byref(size))
return size.value
def set_timestamp_buffer_size(self, size):
"""`size` is the amount of timestamps that the
the counter will store. Range is 1->1000000
"""
self._set_value(
size,
self.idq801Lib.TDC_setTimestampBufferSize,
self.get_timestamp_buffer_size,
)
def get_timestamps(self, clear_retrieved_timestamps=True, trim_time_s=None):
"""
Gets all the time stamps in the buffer and returns
a dictionary corresponding to the timestamps in each
channel.
args:
clear_retrieved_timestamps(bool): Clears the timestamp
buffer of the IDQ801 after reading.
trim_time_s(float, None): The amount of timestamps, in
seconds, from the import first timestamps to keep.
If `None`, all timestamps are returned. Multiple
channels are all trimmed starting from the lowest
timestamps of all the channels combined.
returns:
dict: A dictionary containing numpy arrays with the
timestamps of each channel. The time from the
last calling of this function is also returned
in the dictionary.
"""
if self.get_timestamp_buffer_size() == 0:
raise RuntimeError(
"The timestamp buffer size is 0. \
Can't get timestamps. Need to set the timestamp \
buffer."
)
r = ct.c_int32(clear_retrieved_timestamps)
ts = (ct.c_int64 * self.get_timestamp_buffer_size())()
c = (ct.c_int8 * self.get_timestamp_buffer_size())()
v = ct.c_int32()
self.idq801Lib.TDC_getLastTimestamps(r, ts, c, ct.byref(v))
time_read = time.time()
time_diff = time_read - self._time_last_get_timestamps
self._time_last_get_timestamps = time_read
channel = np.frombuffer(c, dtype=np.int8)
channel_masks = [
channel == i for i in range(4) if self._channel_mask & (1 << i)
]
timestamps = np.frombuffer(ts, dtype=np.int64)
timestamps_masked = {
str(c + 1): timestamps[c_m] for c, c_m in enumerate(channel_masks)
}
timestamps_masked.update((k, v[v > 0]) for k, v in timestamps_masked.items())
last_counts = []
if trim_time_s:
for timestamps in timestamps_masked.values():
if timestamps.size:
first_count = timestamps[0]
last_counts.append(
first_count + int(trim_time_s / self.get_timebase() + 0.5)
)
if len(last_counts):
last_count = np.min(last_counts)
for channel, timestamps in timestamps_masked.items():
if timestamps.size:
last_idx = np.searchsorted(timestamps, last_count, "right")
timestamps_masked[channel] = timestamps[: last_idx - 1]
timestamps_masked["time_diff"] = time_diff
return timestamps_masked
def _get_coins(self, timestamps_1, timestamps_2, method="2"):
t2 = np.array(timestamps_2, dtype=np.int64)
assert method in ("1", "2"), "Invalid method chosen."
if method == "1":
t1 = np.empty(len(timestamps_1) + 2, dtype=np.int64)
t1[0] = 0
t1[-1] = np.iinfo(np.int64).max
t1[1:-1] = timestamps_1
t2_pos = np.searchsorted(t1, t2)
t1_pos_forw = t2_pos
t1_pos_back = t2_pos - 1
t1_pos_back[t1_pos_back == -1] = 0
dt_forw = np.abs(t1[t1_pos_forw] - t2) <= self.get_coincidence_window_bins()
dt_back = np.abs(t1[t1_pos_back] - t2) <= self.get_coincidence_window_bins()
coin_forw_args = dt_forw.nonzero()[0]
coin_back_args = dt_back.nonzero()[0]
coins_forw = np.c_[t1_pos_forw[coin_forw_args] - 1, coin_forw_args]
coins_back = np.c_[t1_pos_back[coin_back_args] - 1, coin_back_args]
coins = np.vstack((coins_back, coins_forw))
elif method == "2":
t1 = np.array(timestamps_1, dtype=np.int64)
l = np.searchsorted(t1, t2 - self.get_coincidence_window_bins() / 2)
r = np.searchsorted(t1, t2 + self.get_coincidence_window_bins() / 2)
args = np.where(l != r)[0]
coins = np.c_[r[args], args]
return coins
def get_coin_counts(
self, coin_channels, accidentals_delay_ns=None, trim_time_s=None
):
bin = self.get_timebase()
timestamps = self.get_timestamps(
clear_retrieved_timestamps=True, trim_time_s=trim_time_s
)
time_diff = timestamps["time_diff"]
timestamps.pop("time_diff", None)
coin_counts = {}
acc_counts = {}
# Get singles counts
for c in coin_channels:
if str(c) in timestamps:
coin_counts[str(c)] = len(timestamps[str(c)])
else:
coin_counts[str(c)] = 0
coin_combinations = list(it.combinations(coin_channels, 2))
for c in coin_combinations:
# Get coincidence counts
if str(c[0]) in timestamps and str(c[1]) in timestamps:
coin_counts[str(c[0]) + "/" + str(c[1])] = len(
self._get_coins(timestamps[str(c[0])], timestamps[str(c[1])])
)
else:
coin_counts[str(c[0]) + "/" + str(c[1])] = 0
if accidentals_delay_ns != None:
accidentals_delay_bin = int(accidentals_delay_ns * 1e-9 / bin)
for c in coin_combinations:
# Get accidental counts
if str(c[0]) in timestamps and str(c[1]) in timestamps:
acc_counts[str(c[0]) + "/" + str(c[1])] = len(
self._get_coins(
timestamps[str(c[0])],
timestamps[str(c[1])] + accidentals_delay_bin,
)
)
else:
acc_counts[str(c[0]) + "/" + str(c[1])] = 0
return coin_counts, acc_counts, timestamps
def scan_channel_delay(
self, coin_channels, scan_channel, scan_range_ns, integration_time=1.0
):
"""
Scans channel delay electronically - integrates once then applies delays to the timestamps to find coins
Args:
coin_channels: channels to look at coins
scan_channel: channel to scan
scan_range_ns: +/- range of delay in ns
integration_time: initial integration time
Returns: max coin reading, delay in ns of the max, all coin counts, delay range
"""
current_delays_bins = self.get_channel_delays_bins()
self.set_channel_delays_ns({str(coin_channels[0]): 0, str(coin_channels[1]): 0})
bin = self.get_timebase()
self.get_timestamps()
time.sleep(integration_time)
original_timestamps = self.get_timestamps()
delay_range = range(-scan_range_ns, scan_range_ns + 1)
coin_counts = np.zeros(len(delay_range))
timestamps = copy.deepcopy(original_timestamps)
for idd, d in enumerate(delay_range):
timestamps[str(scan_channel)] = copy.deepcopy(
original_timestamps[str(scan_channel)]
) + int(d * 1e-9 / bin)
coin_counts[idd] = len(
self._get_coins(
timestamps[str(coin_channels[0])], timestamps[str(coin_channels[1])]
)
)
print(
"delay channel = %s, delay = %s ns, coin counts = %s"
% (scan_channel, d, int(coin_counts[idd]))
)
max_coin = np.max(coin_counts)
max_coin_delay = delay_range[np.argmax(coin_counts)]
self.set_channel_delays_bins(current_delays_bins)
return max_coin, max_coin_delay, coin_counts, delay_range
def get_timestamps_continuous(self, seconds=-1):
"""Runs `gets_timestamps` continuously in a separate
thread for `seconds` amount of seconds in a loop.
If seconds == -1, it doesn't timeout. Returns a
thread object that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
clear_retrieved_timestamps = True
t = ThreadStoppable(
self.get_timestamps, seconds, True, args=(clear_retrieved_timestamps,)
)
return t
def write_timestamps_to_file(self):
"""Writes the timestamps in the buffer to a
file.
"""
timestamp_dir = "Timestamps"
if not os.path.isdir(self._data_directory + "/" + timestamp_dir):
os.mkdir(self._data_directory + "/" + timestamp_dir)
filename_prefix = (
self._data_directory + "/" + timestamp_dir + "/" + "timestamp_channel_"
)
filenames = [filename_prefix + str(i) + ".dat" for i in range(1, 9)]
for fn in filenames:
if not os.path.exists(fn):
open(fn, "w").close()
ts = self.get_timestamps(clear_retrieved_timestamps=True)
for i, fn in enumerate(filenames):
with open(fn, "a") as fs:
try:
for t in ts[str(i + 1)]:
fs.write(str(t) + "\n")
except KeyError:
pass
def write_timestamps_to_file_continuous(self, seconds=-1):
"""Runs `write_timestamps_to_file` continuously in a separate
thread for `seconds` amount of seconds in a loop. If
seconds == -1, it doesn't timeout. Returns a thread object
that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(self.write_timestamps_to_file, seconds)
return t
def get_counters(self):
"""Returns a list of the most recent value of
of the counters.
"""
counters = (ct.c_int32 * 19)()
self.idq801Lib.TDC_getCoincCounters(counters, None)
return list(counters)
def get_counters_continuous(self, seconds=-1):
"""Runs `get_counters` continuously in a separate thread for
`seconds` amount of seconds in a loop. If seconds == -1,
it doesn't timeout. Returns a thread object that can be
stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(self.get_counters, seconds, True)
return t
def write_counters_to_file(self, filename="counters.dat"):
"""Writes the most recent values of the internal
counters and coincidence counters to a file
named `filename`.
"""
fn = self._data_directory + "/" + filename
if not os.path.exists(fn):
with open(fn, "w") as fs:
header = (
"1,2,3,4,5,6,7,8,1/2,1/3,1/4,2/3,2/4,3/4,"
"1/2/3,1/2/4,1/3/4,2/3/4,1/2/3/4"
)
fs.write("#" + header + "\n")
counters = self.get_counters()
counters_str = ",".join([str(c) for c in counters])
with open(fn, "a") as fs:
fs.write(counters_str + "\n")
def write_counters_to_file_continuous(self, seconds=-1, filename="counters.dat"):
"""Runs `write_counters_to_file` continuously in a separate
thread for `seconds` amount of seconds in a loop. If
seconds == -1, it doesn't timeout. Returns a thread
object that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(
self.write_counters_to_file, seconds, False, args=(filename,)
)
return t
def _get_channel_delays(self):
channels = range(8)
channels = (ct.c_int32 * len(channels))(*channels)
self.idq801Lib.TDC_getChannelDelays(channels)
return channels
def get_channel_delays_bins(self):
return list(self._get_channel_delays())
def get_channel_delays_ns(self):
bin = self.get_timebase()
delays_bins = list(self._get_channel_delays())
return [d * 1e9 * bin for d in delays_bins]
def set_channel_delays_bins(self, delays_bins):
delays = (ct.c_int * len(delays_bins))(*delays_bins)
return self._set_value(
delays, self.idq801Lib.TDC_setChannelDelays, self._get_channel_delays
)
def set_channel_delays_ns(self, delays_ns_dict):
"""
Set channel delays in ns. The delays are in a dictionary.
Args:
delays_ns_dict:
Returns:
"""
delays_ns = self.get_channel_delays_ns()
for channel in delays_ns_dict.keys():
self.channel_delays[str(channel)] = delays_ns[int(channel) - 1]
delays_ns[int(channel) - 1] = delays_ns_dict[str(channel)]
bin = self.get_timebase()
delays_bins = [int(d * 1e-9 / bin) for d in delays_ns]
return self.set_channel_delays_bins(delays_bins)
def main():
idq801 = Idq801()
idq801.clean_data_directory()
idq801.set_channel((1, 2))
# t1 = idq801.write_counters_to_file_continuous(2)
# t2 = idq801.write_timestamps_to_file_continuous(2)
#
if __name__ == "__main__":
main()
| import sys
import numpy as np
import shutil
import time
import itertools as it
import collections
import ctypes as ct
import os
import copy
sys.path.append(os.path.dirname(__file__))
from ThreadStoppable import ThreadStoppable
class Idq801(object):
def __init__(
self,
deviceId=-1,
timestamp_buffer_size=int(1e6),
integration_time_ms=0.5 * 1e3,
coincidence_window_bins=1000,
max_retry=3,
delay_retry_sec=0.01,
clean_data_directory=False,
data_directory="Idq801Data",
processing="external",
):
self._max_retry = max_retry
self._set_check_delay = delay_retry_sec # Delay in seconds between setting and
# checking that a parameter was set.
self._data_directory = data_directory
self._wait_for_settings = 1
self._processing_dict = {"i": "internal", "e": "external"}
processing = processing.lower()
assert processing in self._processing.values()
self._processing = processing
if not os.path.isdir(data_directory):
os.mkdir(data_directory)
if clean_data_directory:
self.clean_data_directory()
module_path = os.path.dirname(__file__) + "/"
if sys.platform == "linux":
self.idq801Lib = ct.CDLL(module_path + "libtdcbase.so")
elif sys.platform == "win32":
self.idq801Lib = ct.CDLL(module_path + "./tdcbase.dll")
else:
raise OSError("Invalid operating system")
if self.idq801Lib.TDC_init(deviceId):
raise RuntimeError("Could not connect to the ID801 counter.")
# Initial parameters.
self.unset_channel(-1)
self.set_timestamp_buffer_size(timestamp_buffer_size)
self.integration_time_ms = integration_time_ms
if self._processing == self._processing_dict["i"]:
self.set_integration_time(integration_time_ms)
else:
self.set_integration_time(1.0e-3) # 1us integration time.
self.set_coincidence_window_bins(1000)
self._time_last_get_timestamps = time.time()
self.channel_delays = {
"1": 0,
"2": 0,
"3": 0,
"4": 0,
"5": 0,
"6": 0,
"7": 0,
"8": 0,
}
self.set_channel_delays_ns(self.channel_delays)
self.accidental_delay = 0
def __del__(self):
self.idq801Lib.TDC_deInit()
def _set_value(self, set_value, setter, getter):
"""Sets a value and makes sure it was set."""
attempt = 0
is_set = False
while not is_set and attempt < self._max_retry:
attempt += 1
setter(set_value)
time.sleep(self._set_check_delay)
try:
if list(set_value) == list(getter()):
is_set = True
except TypeError:
if set_value == getter():
is_set = True
if not is_set:
raise RuntimeError(
"Unable to set the value using %s to %s after %i attempts."
% (setter.__name__, str(set_value), self._max_retry)
)
def _get_device_params(self):
cm = ct.c_int32()
cw = ct.c_int32()
ew = ct.c_int32()
self.idq801Lib.TDC_getDeviceParams(ct.byref(cm), ct.byref(cw), ct.byref(ew))
return (cm, cw, ew)
def _set_processing(self, processing):
processing = processing.lower()
assert processing in self._processing_dict.values()
self._processing = processing
if processing == self._processing_dict["i"]:
self.set_integration_time(self.integration_time_ms)
return self._processing
def set_processing_internal(self):
return self._set_processing("internal")
def set_processing_external(self):
return self._set_processing("external")
def clean_data_directory(self):
"""
Deletes all data in the `Idq801Data` directory.
"""
shutil.rmtree(self._data_directory, ignore_errors=True)
os.mkdir(self._data_directory)
def get_timebase(self):
self.idq801Lib.TDC_getTimebase.restype = ct.c_double
tb = self.idq801Lib.TDC_getTimebase()
return tb
def get_mask_channels(self):
cm, _, _ = self._get_device_params()
return cm.value
def get_status_channels(self):
cm, cw, ew = self._get_device_params()
channels_enabled = [bool(int(c)) for c in bin(cm.value)[2:]][::-1]
padLength = 8 - len(channels_enabled)
channels_enabled.extend([False] * padLength)
return tuple(channels_enabled)
def get_enabled_channels(self):
channels_status = self.get_status_channels()
channels_enabled = tuple(
i + 1 for i, v in enumerate(channels_status) if v == True
)
return channels_enabled
def get_disabled_channels(self):
channels_status = self.get_status_channels()
channels_disabled = tuple(
i + 1 for i, v in enumerate(channels_status) if v == False
)
return channels_disabled
def is_channel_enabled(self, channel):
assert 1 <= channel <= 8, "Invalid choice channel range."
channel -= 1
channel_status = self.get_status_channels()[channel]
return channel_status
def _get_channel_mask(self, channel, set_unset):
def channel_mask_from_channel_list(channels_enabled):
channel_mask = 0
for b in channels_enabled[::-1]:
channel_mask = (channel_mask << b - 1) | True
return channel_mask
set_unset = set_unset.lower()
assert set_unset in ("set", "unset"), (
"Invalid `set_unset` choice %s." % set_unset
)
if isinstance(channel, str):
channel = channel.lower()
if channel == "all" or channel == -1:
channel_mask = 0xFF
elif channel in range(1, 9):
channel_mask = 1 << channel
elif isinstance(channel, collections.Iterable):
channel_mask = channel_mask_from_channel_list(channel)
else:
raise TypeError("Invalid `channel` choice.")
if set_unset == "unset":
channel_mask ^= 0xFF
return channel_mask
def _set_unset_channel(self, channel, set_unset):
self._channel_mask = self._get_channel_mask(channel, set_unset)
self._set_value(
self._channel_mask,
self.idq801Lib.TDC_enableChannels,
self.get_mask_channels,
)
return self._channel_mask
def set_channel(self, channel):
"""Choose which channels to enable.
Options include:
* -1 or 'all' for (all channels).
* A single number for channel to be enabled.
* An iterable containing the channels
to be enables. e.g. (1,4,5)
* Default is no channels are enabled.
"""
return self._set_unset_channel(channel, "set")
def unset_channel(self, channel):
"""Choose which channels to disable.
Options include:
* -1 or 'all' for (all channels).
* A single number for channel to be disabled.
* An iterable containing the channels
to be disables. e.g. (1,4,5)
* Default is no channels are disabled.
"""
return self._set_unset_channel(channel, "unset")
def get_coincidence_window_bins(self):
cm, cw, ew = self._get_device_params()
return cw.value
def get_coincidence_window_ns(self):
bin = self.get_timebase()
return bin * self.get_coincidence_window_bins() * 1e9
def set_coincidence_window_bins(self, coincidence_window_bins):
coincidence_window_bins = int(coincidence_window_bins)
if not 0 < coincidence_window_bins <= 65535:
raise ValueError(
"The chosen number of coincidence \
window bins is not in the range (0,65535]."
)
self._set_value(
coincidence_window_bins,
self.idq801Lib.TDC_setCoincidenceWindow,
self.get_coincidence_window_bins,
)
def set_coincidence_window_ns(self, coincidence_window_ns):
bin = self.get_timebase()
coincidence_window_bins = int(coincidence_window_ns * 1e-9 / bin)
return self.set_coincidence_window_bins(coincidence_window_bins)
def get_integration_time(self):
cm, cw, ew = self._get_device_params()
return ew.value
def freeze_buffers(self):
self.idq801Lib.TDC_freezeBuffers(True)
def unfreeze_buffers(self):
self.idq801Lib.TDC_freezeBuffers(False)
def set_integration_time(self, window_time_ms):
window_time_ms = round(window_time_ms)
if self._processing == self._processing_dict["i"]:
if not 0 < window_time_ms <= 65535:
raise ValueError(
"The chosen exposure window is not \
in the range (0,65535]. Can't do more than 65.5s \
integration time internally."
)
self._set_value(
self.window_time_ms,
self.idq801Lib.TDC_setExposureTime,
self.get_integration_time,
)
def get_data_lost_status(self):
"""Returns true if data is being lost, and false
if data is not being lost.
"""
# Get the status of the lost latch.
lost = ct.c_int32()
self.idq801Lib.TDC_getDataLost(ct.byref(lost))
latch = lost.value
# Calls the function again to clear the lost latch.
self.idq801Lib.TDC_getDataLost(ct.byref(lost))
return latch
def get_timestamp_buffer_size(self):
size = ct.c_int32()
self.idq801Lib.TDC_getTimestampBufferSize(ct.byref(size))
return size.value
def set_timestamp_buffer_size(self, size):
"""`size` is the amount of timestamps that the
the counter will store. Range is 1->1000000
"""
self._set_value(
size,
self.idq801Lib.TDC_setTimestampBufferSize,
self.get_timestamp_buffer_size,
)
def get_timestamps(self, clear_retrieved_timestamps=True, trim_time_s=None):
"""
Gets all the time stamps in the buffer and returns
a dictionary corresponding to the timestamps in each
channel.
args:
clear_retrieved_timestamps(bool): Clears the timestamp
buffer of the IDQ801 after reading.
trim_time_s(float, None): The amount of timestamps, in
seconds, from the import first timestamps to keep.
If `None`, all timestamps are returned. Multiple
channels are all trimmed starting from the lowest
timestamps of all the channels combined.
returns:
dict: A dictionary containing numpy arrays with the
timestamps of each channel. The time from the
last calling of this function is also returned
in the dictionary.
"""
if self.get_timestamp_buffer_size() == 0:
raise RuntimeError(
"The timestamp buffer size is 0. \
Can't get timestamps. Need to set the timestamp \
buffer."
)
r = ct.c_int32(clear_retrieved_timestamps)
ts = (ct.c_int64 * self.get_timestamp_buffer_size())()
c = (ct.c_int8 * self.get_timestamp_buffer_size())()
v = ct.c_int32()
self.idq801Lib.TDC_getLastTimestamps(r, ts, c, ct.byref(v))
time_read = time.time()
time_diff = time_read - self._time_last_get_timestamps
self._time_last_get_timestamps = time_read
channel = np.frombuffer(c, dtype=np.int8)
channel_masks = [
channel == i for i in range(4) if self._channel_mask & (1 << i)
]
timestamps = np.frombuffer(ts, dtype=np.int64)
timestamps_masked = {
str(c + 1): timestamps[c_m] for c, c_m in enumerate(channel_masks)
}
timestamps_masked.update((k, v[v > 0]) for k, v in timestamps_masked.items())
last_counts = []
if trim_time_s:
for timestamps in timestamps_masked.values():
if timestamps.size:
first_count = timestamps[0]
last_counts.append(
first_count + int(trim_time_s / self.get_timebase() + 0.5)
)
if len(last_counts):
last_count = np.min(last_counts)
for channel, timestamps in timestamps_masked.items():
if timestamps.size:
last_idx = np.searchsorted(timestamps, last_count, "right")
timestamps_masked[channel] = timestamps[: last_idx - 1]
timestamps_masked["time_diff"] = time_diff
return timestamps_masked
def _get_coins(self, timestamps_1, timestamps_2, method="2"):
t2 = np.array(timestamps_2, dtype=np.int64)
assert method in ("1", "2"), "Invalid method chosen."
if method == "1":
t1 = np.empty(len(timestamps_1) + 2, dtype=np.int64)
t1[0] = 0
t1[-1] = np.iinfo(np.int64).max
t1[1:-1] = timestamps_1
t2_pos = np.searchsorted(t1, t2)
t1_pos_forw = t2_pos
t1_pos_back = t2_pos - 1
t1_pos_back[t1_pos_back == -1] = 0
dt_forw = np.abs(t1[t1_pos_forw] - t2) <= self.get_coincidence_window_bins()
dt_back = np.abs(t1[t1_pos_back] - t2) <= self.get_coincidence_window_bins()
coin_forw_args = dt_forw.nonzero()[0]
coin_back_args = dt_back.nonzero()[0]
coins_forw = np.c_[t1_pos_forw[coin_forw_args] - 1, coin_forw_args]
coins_back = np.c_[t1_pos_back[coin_back_args] - 1, coin_back_args]
coins = np.vstack((coins_back, coins_forw))
elif method == "2":
t1 = np.array(timestamps_1, dtype=np.int64)
l = np.searchsorted(t1, t2 - self.get_coincidence_window_bins() / 2)
r = np.searchsorted(t1, t2 + self.get_coincidence_window_bins() / 2)
args = np.where(l != r)[0]
coins = np.c_[r[args], args]
return coins
def get_coin_counts(
self, coin_channels, accidentals_delay_ns=None, trim_time_s=None
):
bin = self.get_timebase()
timestamps = self.get_timestamps(
clear_retrieved_timestamps=True, trim_time_s=trim_time_s
)
time_diff = timestamps["time_diff"]
timestamps.pop("time_diff", None)
coin_counts = {}
acc_counts = {}
# Get singles counts
for c in coin_channels:
if str(c) in timestamps:
coin_counts[str(c)] = len(timestamps[str(c)])
else:
coin_counts[str(c)] = 0
coin_combinations = list(it.combinations(coin_channels, 2))
for c in coin_combinations:
# Get coincidence counts
if str(c[0]) in timestamps and str(c[1]) in timestamps:
coin_counts[str(c[0]) + "/" + str(c[1])] = len(
self._get_coins(timestamps[str(c[0])], timestamps[str(c[1])])
)
else:
coin_counts[str(c[0]) + "/" + str(c[1])] = 0
if accidentals_delay_ns != None:
accidentals_delay_bin = int(accidentals_delay_ns * 1e-9 / bin)
for c in coin_combinations:
# Get accidental counts
if str(c[0]) in timestamps and str(c[1]) in timestamps:
acc_counts[str(c[0]) + "/" + str(c[1])] = len(
self._get_coins(
timestamps[str(c[0])],
timestamps[str(c[1])] + accidentals_delay_bin,
)
)
else:
acc_counts[str(c[0]) + "/" + str(c[1])] = 0
return coin_counts, acc_counts, timestamps
def scan_channel_delay(
self, coin_channels, scan_channel, scan_range_ns, integration_time=1.0
):
"""
Scans channel delay electronically - integrates once then applies delays to the timestamps to find coins
Args:
coin_channels: channels to look at coins
scan_channel: channel to scan
scan_range_ns: +/- range of delay in ns
integration_time: initial integration time
Returns: max coin reading, delay in ns of the max, all coin counts, delay range
"""
current_delays_bins = self.get_channel_delays_bins()
self.set_channel_delays_ns({str(coin_channels[0]): 0, str(coin_channels[1]): 0})
bin = self.get_timebase()
self.get_timestamps()
time.sleep(integration_time)
original_timestamps = self.get_timestamps()
delay_range = range(-scan_range_ns, scan_range_ns + 1)
coin_counts = np.zeros(len(delay_range))
timestamps = copy.deepcopy(original_timestamps)
for idd, d in enumerate(delay_range):
timestamps[str(scan_channel)] = copy.deepcopy(
original_timestamps[str(scan_channel)]
) + int(d * 1e-9 / bin)
coin_counts[idd] = len(
self._get_coins(
timestamps[str(coin_channels[0])], timestamps[str(coin_channels[1])]
)
)
print(
"delay channel = %s, delay = %s ns, coin counts = %s"
% (scan_channel, d, int(coin_counts[idd]))
)
max_coin = np.max(coin_counts)
max_coin_delay = delay_range[np.argmax(coin_counts)]
self.set_channel_delays_bins(current_delays_bins)
return max_coin, max_coin_delay, coin_counts, delay_range
def get_timestamps_continuous(self, seconds=-1):
"""Runs `gets_timestamps` continuously in a separate
thread for `seconds` amount of seconds in a loop.
If seconds == -1, it doesn't timeout. Returns a
thread object that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
clear_retrieved_timestamps = True
t = ThreadStoppable(
self.get_timestamps, seconds, True, args=(clear_retrieved_timestamps,)
)
return t
def write_timestamps_to_file(self):
"""Writes the timestamps in the buffer to a
file.
"""
timestamp_dir = "Timestamps"
if not os.path.isdir(self._data_directory + "/" + timestamp_dir):
os.mkdir(self._data_directory + "/" + timestamp_dir)
filename_prefix = (
self._data_directory + "/" + timestamp_dir + "/" + "timestamp_channel_"
)
filenames = [filename_prefix + str(i) + ".dat" for i in range(1, 9)]
for fn in filenames:
if not os.path.exists(fn):
open(fn, "w").close()
ts = self.get_timestamps(clear_retrieved_timestamps=True)
for i, fn in enumerate(filenames):
with open(fn, "a") as fs:
try:
for t in ts[str(i + 1)]:
fs.write(str(t) + "\n")
except KeyError:
pass
def write_timestamps_to_file_continuous(self, seconds=-1):
"""Runs `write_timestamps_to_file` continuously in a separate
thread for `seconds` amount of seconds in a loop. If
seconds == -1, it doesn't timeout. Returns a thread object
that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(self.write_timestamps_to_file, seconds)
return t
def get_counters(self):
"""Returns a list of the most recent value of
of the counters.
"""
counters = (ct.c_int32 * 19)()
self.idq801Lib.TDC_getCoincCounters(counters, None)
return list(counters)
def get_counters_continuous(self, seconds=-1):
"""Runs `get_counters` continuously in a separate thread for
`seconds` amount of seconds in a loop. If seconds == -1,
it doesn't timeout. Returns a thread object that can be
stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(self.get_counters, seconds, True)
return t
def write_counters_to_file(self, filename="counters.dat"):
"""Writes the most recent values of the internal
counters and coincidence counters to a file
named `filename`.
"""
fn = self._data_directory + "/" + filename
if not os.path.exists(fn):
with open(fn, "w") as fs:
header = (
"1,2,3,4,5,6,7,8,1/2,1/3,1/4,2/3,2/4,3/4,"
"1/2/3,1/2/4,1/3/4,2/3/4,1/2/3/4"
)
fs.write("#" + header + "\n")
counters = self.get_counters()
counters_str = ",".join([str(c) for c in counters])
with open(fn, "a") as fs:
fs.write(counters_str + "\n")
def write_counters_to_file_continuous(self, seconds=-1, filename="counters.dat"):
"""Runs `write_counters_to_file` continuously in a separate
thread for `seconds` amount of seconds in a loop. If
seconds == -1, it doesn't timeout. Returns a thread
object that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(
self.write_counters_to_file, seconds, False, args=(filename,)
)
return t
def _get_channel_delays(self):
channels = range(8)
channels = (ct.c_int32 * len(channels))(*channels)
self.idq801Lib.TDC_getChannelDelays(channels)
return channels
def get_channel_delays_bins(self):
return list(self._get_channel_delays())
def get_channel_delays_ns(self):
bin = self.get_timebase()
delays_bins = list(self._get_channel_delays())
return [d * 1e9 * bin for d in delays_bins]
def set_channel_delays_bins(self, delays_bins):
delays = (ct.c_int * len(delays_bins))(*delays_bins)
return self._set_value(
delays, self.idq801Lib.TDC_setChannelDelays, self._get_channel_delays
)
def set_channel_delays_ns(self, delays_ns_dict):
"""
Set channel delays in ns. The delays are in a dictionary.
Args:
delays_ns_dict:
Returns:
"""
delays_ns = self.get_channel_delays_ns()
for channel in delays_ns_dict.keys():
self.channel_delays[str(channel)] = delays_ns[int(channel) - 1]
delays_ns[int(channel) - 1] = delays_ns_dict[str(channel)]
bin = self.get_timebase()
delays_bins = [int(d * 1e-9 / bin) for d in delays_ns]
return self.set_channel_delays_bins(delays_bins)
def main():
idq801 = Idq801()
idq801.clean_data_directory()
idq801.set_channel((1, 2))
# t1 = idq801.write_counters_to_file_continuous(2)
# t2 = idq801.write_timestamps_to_file_continuous(2)
#
if __name__ == "__main__":
main()
| en | 0.882109 | # Delay in seconds between setting and # checking that a parameter was set. # Initial parameters. # 1us integration time. Sets a value and makes sure it was set. Deletes all data in the `Idq801Data` directory. Choose which channels to enable. Options include: * -1 or 'all' for (all channels). * A single number for channel to be enabled. * An iterable containing the channels to be enables. e.g. (1,4,5) * Default is no channels are enabled. Choose which channels to disable. Options include: * -1 or 'all' for (all channels). * A single number for channel to be disabled. * An iterable containing the channels to be disables. e.g. (1,4,5) * Default is no channels are disabled. Returns true if data is being lost, and false if data is not being lost. # Get the status of the lost latch. # Calls the function again to clear the lost latch. `size` is the amount of timestamps that the the counter will store. Range is 1->1000000 Gets all the time stamps in the buffer and returns a dictionary corresponding to the timestamps in each channel. args: clear_retrieved_timestamps(bool): Clears the timestamp buffer of the IDQ801 after reading. trim_time_s(float, None): The amount of timestamps, in seconds, from the import first timestamps to keep. If `None`, all timestamps are returned. Multiple channels are all trimmed starting from the lowest timestamps of all the channels combined. returns: dict: A dictionary containing numpy arrays with the timestamps of each channel. The time from the last calling of this function is also returned in the dictionary. # Get singles counts # Get coincidence counts # Get accidental counts Scans channel delay electronically - integrates once then applies delays to the timestamps to find coins Args: coin_channels: channels to look at coins scan_channel: channel to scan scan_range_ns: +/- range of delay in ns integration_time: initial integration time Returns: max coin reading, delay in ns of the max, all coin counts, delay range Runs `gets_timestamps` continuously in a separate thread for `seconds` amount of seconds in a loop. If seconds == -1, it doesn't timeout. Returns a thread object that can be stopped and started. Writes the timestamps in the buffer to a file. Runs `write_timestamps_to_file` continuously in a separate thread for `seconds` amount of seconds in a loop. If seconds == -1, it doesn't timeout. Returns a thread object that can be stopped and started. Returns a list of the most recent value of of the counters. Runs `get_counters` continuously in a separate thread for `seconds` amount of seconds in a loop. If seconds == -1, it doesn't timeout. Returns a thread object that can be stopped and started. Writes the most recent values of the internal counters and coincidence counters to a file named `filename`. Runs `write_counters_to_file` continuously in a separate thread for `seconds` amount of seconds in a loop. If seconds == -1, it doesn't timeout. Returns a thread object that can be stopped and started. Set channel delays in ns. The delays are in a dictionary. Args: delays_ns_dict: Returns: # t1 = idq801.write_counters_to_file_continuous(2) # t2 = idq801.write_timestamps_to_file_continuous(2) # | 2.096775 | 2 |
IRIS/IRIS_formatting.py | Xinglab/IRIS | 7 | 10662 | import sys, numpy, argparse, os
def loadSamplelist(fin_samples, sample_fin_list, sample_header, sample_name_field, sample_size):
for l in open(fin_samples):
ls=l.strip()
sample_fin_list.append(ls)
for r in open(ls):
rs=map(lambda x:x.split('/')[-sample_name_field].split('.bam')[0],r.strip().strip(',').split(','))
#rs=map(lambda x:x.split('/')[-2],r.strip().strip(',').split(','))
if sample_name_field==2:
sn_list=r.strip().strip(',').split(',')
for e,sn in enumerate(rs):
if len(sn)==0:
rs[e]=sn_list[e].split('/')[-1].split('.')[0]
sample_header+=rs
sample_size[ls]=len(r.split(','))
return sample_fin_list, sample_header, sample_size
def mergeEvents(events_fin_list):
total_event_dict={}
for events_fin in events_fin_list:
for index,event_l in enumerate(open(events_fin)):
if index==0:
continue
event_ls=event_l.strip().split('\t')
events_cord=event_ls[1].strip('"')+'\t'+event_ls[2].strip('"')+'\t'+'\t'.join(event_ls[3:7]+event_ls[8:10])
if events_cord in total_event_dict:
continue
total_event_dict[events_cord]=''
return total_event_dict
def writeMergedEvents(events_fin_list, splicing_event_type, cov_cutoff, data_name, fout_path):
total_event_dict=mergeEvents(events_fin_list)
print len(total_event_dict)
total_event_list=sorted(total_event_dict.keys())
fout=open(fout_path+'/prefilter_events.splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt','w')
for e in total_event_list:
fout.write(e.strip()+'\n')
fout.close()
return total_event_list
def mergeMatrixInBatch(fin_list, events_fin_list, sample_fin_list, cov_cutoff, data_name, splicing_event_type, sample_header, sample_size, total_event_list, file_batch_list, batch, fout_path):
for b in range(0,len(total_event_list),batch):
Intercep_Matrix={}
print '[INFO] Merging in progress. Working on batch ',b
batch_event_list= total_event_list[b:min(b+batch,len(total_event_list))]
batch_event_dict= dict.fromkeys(batch_event_list, 0)
for n,fin in enumerate(fin_list):
eventID={}
for index,event_l in enumerate(open(events_fin_list[n])):
if index==0:
continue
event_ls=event_l.strip().split('\t')
event_cord=event_ls[1].strip('"')+'\t'+event_ls[2].strip('"')+'\t'+'\t'.join(event_ls[3:7]+event_ls[8:10])
if event_cord in batch_event_dict:
eventID[event_ls[0]]=event_cord
print '[INFO] Merging file: ', fin, len(eventID)
for index,r in enumerate(open(fin)):
if index==0:
continue
rs=r.strip().split('\t')
if rs[0] not in eventID:
continue
Incl=map(float,rs[1].split(','))
Skip=map(float,rs[2].split(','))
Cov=[num+Skip[o] for o,num in enumerate(Incl)]
psi_values=[]
for i,I in enumerate(Incl):
if int(I)+int(Skip[i])==0:
psi_values.append('NaN')
else:
psi_values.append(str(round(I/int(rs[5])/(I/int(rs[5])+Skip[i]/int(rs[6])),4)))
if eventID[rs[0]] not in Intercep_Matrix:
Intercep_Matrix[eventID[rs[0]]]={}
if sample_fin_list[n] not in Intercep_Matrix[eventID[rs[0]]]:
Intercep_Matrix[eventID[rs[0]]][sample_fin_list[n]]=(psi_values,Cov)
if len(psi_values)!=sample_size[sample_fin_list[n]]:
exit('[Abort] Sample number does not match observations in JC file.')
file_batch_list.append(fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt.batch_'+str(b)+'.txt')
fout=open(fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt.batch_'+str(b)+'.txt','w')
fout.write('AC\tGeneName\tchr\tstrand\texonStart\texonEnd\tupstreamEE\tdownstreamES\t'+'\t'.join(sample_header)+'\n')
for k in sorted(Intercep_Matrix.keys()):
psi_value_all=[]
cov_all=[]
for sample in sample_fin_list:
if sample in Intercep_Matrix[k]:
psi_value_all+=Intercep_Matrix[k][sample][0]
cov_all+=Intercep_Matrix[k][sample][1]
else:
psi_value_all+=['NaN']*sample_size[sample]
mean=numpy.mean(cov_all)
if mean>=cov_cutoff:
fout.write(k+'\t'+'\t'.join(psi_value_all)+'\n')
fout.close()
return file_batch_list
def mergeMatrixInOne(file_batch_list, cov_cutoff, data_name, splicing_event_type, fout_path):
fout_merge=open(fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt','w')
header=0
for file_batch in file_batch_list:
for j,l in enumerate(open(file_batch)):
if j==0:
if header==0:
header+=1
fout_merge.write(l)
continue
fout_merge.write(l)
fout_merge.close()
os.system('rm '+fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt.batch_*.txt')
return 'splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt'
def index_PsiMatrix(fn,outdir,delim):
out_fp = outdir+'/'+fn.split('/')[-1]+'.idx'
line_formatter = "{id}\t{offset}\n"
offset = 0
with open(fn, 'r') as fin:
with open(out_fp, 'w') as fout:
offset += len(fin.readline())
for line in fin:
ele = line.strip().split(delim)
eid = ':'.join([ele[0].split('_')[0].split('.')[0]]+ele[1:8])
fout.write( line_formatter.format(id=eid, offset=offset) )
offset += len(line)
return
def main(args):
cov_cutoff=args.cov_cutoff
data_name=args.data_name
sample_name_field=args.sample_name_field
splicing_event_type=args.splicing_event_type
if sample_name_field==1:
print '[INFO] Sample name parsed from bam file. (alternatively can be parsed from up level folder)'
if sample_name_field==2:
print '[INFO] Sample name parsed from folder name above the bam file. (alternatively can be parsed from bam file)'
db_dir=args.iris_db_path.rstrip('/')
#prepare files/folders in IRIS db directory
os.system('mkdir -p '+db_dir+'/'+data_name+' '+db_dir+'/'+data_name+'/splicing_matrix')
fout_path=db_dir+'/'+data_name
print '[INFO] output path: '+fout_path
fin_list=[]
sample_fin_list=[]
events_fin_list=[]
sample_size={}
sample_header=[]
file_batch_list=[]
#PARSING INPUT FILE LISTS
fin_list=[l.strip().rstrip('/')+'/JC.raw.input.'+splicing_event_type+'.txt' for l in open(args.rmats_mat_path_manifest)]
events_fin_list=[l.strip().rstrip('/')+'/fromGTF.'+splicing_event_type+'.txt' for l in open(args.rmats_mat_path_manifest)]
sample_fin_list, sample_header, sample_size= loadSamplelist(args.rmats_sample_order,sample_fin_list, sample_header,sample_name_field, sample_size)
#MAKING MERGED EVENTS LIST
total_event_list= writeMergedEvents(events_fin_list, splicing_event_type, cov_cutoff, data_name, fout_path)
if args.merge_events_only:
exit('[INFO] Done merging events only.')
print '[INFO] Done loading file dir', len(total_event_list)
#START MERGING MATRICES IN BATCH MODE FOLLOWING EVENTS LIST GENERATED.
batch=20000
file_batch_list=mergeMatrixInBatch(fin_list, events_fin_list, sample_fin_list, cov_cutoff, data_name, splicing_event_type, sample_header, sample_size, total_event_list, file_batch_list, batch, fout_path)
print '[INFO] Done merging matrices by batch.'
merged_file_name=mergeMatrixInOne(file_batch_list, cov_cutoff, data_name, splicing_event_type, fout_path)
print '[INFO] Done merging matrices: '+merged_file_name
#create index in IRIS db directory
index_PsiMatrix(fout_path+'/splicing_matrix/'+merged_file_name,fout_path+'/splicing_matrix','\t')
print '[INFO] Finished. Created matrix: '+fout_path
if __name__ == '__main__':
main()
| import sys, numpy, argparse, os
def loadSamplelist(fin_samples, sample_fin_list, sample_header, sample_name_field, sample_size):
for l in open(fin_samples):
ls=l.strip()
sample_fin_list.append(ls)
for r in open(ls):
rs=map(lambda x:x.split('/')[-sample_name_field].split('.bam')[0],r.strip().strip(',').split(','))
#rs=map(lambda x:x.split('/')[-2],r.strip().strip(',').split(','))
if sample_name_field==2:
sn_list=r.strip().strip(',').split(',')
for e,sn in enumerate(rs):
if len(sn)==0:
rs[e]=sn_list[e].split('/')[-1].split('.')[0]
sample_header+=rs
sample_size[ls]=len(r.split(','))
return sample_fin_list, sample_header, sample_size
def mergeEvents(events_fin_list):
total_event_dict={}
for events_fin in events_fin_list:
for index,event_l in enumerate(open(events_fin)):
if index==0:
continue
event_ls=event_l.strip().split('\t')
events_cord=event_ls[1].strip('"')+'\t'+event_ls[2].strip('"')+'\t'+'\t'.join(event_ls[3:7]+event_ls[8:10])
if events_cord in total_event_dict:
continue
total_event_dict[events_cord]=''
return total_event_dict
def writeMergedEvents(events_fin_list, splicing_event_type, cov_cutoff, data_name, fout_path):
total_event_dict=mergeEvents(events_fin_list)
print len(total_event_dict)
total_event_list=sorted(total_event_dict.keys())
fout=open(fout_path+'/prefilter_events.splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt','w')
for e in total_event_list:
fout.write(e.strip()+'\n')
fout.close()
return total_event_list
def mergeMatrixInBatch(fin_list, events_fin_list, sample_fin_list, cov_cutoff, data_name, splicing_event_type, sample_header, sample_size, total_event_list, file_batch_list, batch, fout_path):
for b in range(0,len(total_event_list),batch):
Intercep_Matrix={}
print '[INFO] Merging in progress. Working on batch ',b
batch_event_list= total_event_list[b:min(b+batch,len(total_event_list))]
batch_event_dict= dict.fromkeys(batch_event_list, 0)
for n,fin in enumerate(fin_list):
eventID={}
for index,event_l in enumerate(open(events_fin_list[n])):
if index==0:
continue
event_ls=event_l.strip().split('\t')
event_cord=event_ls[1].strip('"')+'\t'+event_ls[2].strip('"')+'\t'+'\t'.join(event_ls[3:7]+event_ls[8:10])
if event_cord in batch_event_dict:
eventID[event_ls[0]]=event_cord
print '[INFO] Merging file: ', fin, len(eventID)
for index,r in enumerate(open(fin)):
if index==0:
continue
rs=r.strip().split('\t')
if rs[0] not in eventID:
continue
Incl=map(float,rs[1].split(','))
Skip=map(float,rs[2].split(','))
Cov=[num+Skip[o] for o,num in enumerate(Incl)]
psi_values=[]
for i,I in enumerate(Incl):
if int(I)+int(Skip[i])==0:
psi_values.append('NaN')
else:
psi_values.append(str(round(I/int(rs[5])/(I/int(rs[5])+Skip[i]/int(rs[6])),4)))
if eventID[rs[0]] not in Intercep_Matrix:
Intercep_Matrix[eventID[rs[0]]]={}
if sample_fin_list[n] not in Intercep_Matrix[eventID[rs[0]]]:
Intercep_Matrix[eventID[rs[0]]][sample_fin_list[n]]=(psi_values,Cov)
if len(psi_values)!=sample_size[sample_fin_list[n]]:
exit('[Abort] Sample number does not match observations in JC file.')
file_batch_list.append(fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt.batch_'+str(b)+'.txt')
fout=open(fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt.batch_'+str(b)+'.txt','w')
fout.write('AC\tGeneName\tchr\tstrand\texonStart\texonEnd\tupstreamEE\tdownstreamES\t'+'\t'.join(sample_header)+'\n')
for k in sorted(Intercep_Matrix.keys()):
psi_value_all=[]
cov_all=[]
for sample in sample_fin_list:
if sample in Intercep_Matrix[k]:
psi_value_all+=Intercep_Matrix[k][sample][0]
cov_all+=Intercep_Matrix[k][sample][1]
else:
psi_value_all+=['NaN']*sample_size[sample]
mean=numpy.mean(cov_all)
if mean>=cov_cutoff:
fout.write(k+'\t'+'\t'.join(psi_value_all)+'\n')
fout.close()
return file_batch_list
def mergeMatrixInOne(file_batch_list, cov_cutoff, data_name, splicing_event_type, fout_path):
fout_merge=open(fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt','w')
header=0
for file_batch in file_batch_list:
for j,l in enumerate(open(file_batch)):
if j==0:
if header==0:
header+=1
fout_merge.write(l)
continue
fout_merge.write(l)
fout_merge.close()
os.system('rm '+fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt.batch_*.txt')
return 'splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt'
def index_PsiMatrix(fn,outdir,delim):
out_fp = outdir+'/'+fn.split('/')[-1]+'.idx'
line_formatter = "{id}\t{offset}\n"
offset = 0
with open(fn, 'r') as fin:
with open(out_fp, 'w') as fout:
offset += len(fin.readline())
for line in fin:
ele = line.strip().split(delim)
eid = ':'.join([ele[0].split('_')[0].split('.')[0]]+ele[1:8])
fout.write( line_formatter.format(id=eid, offset=offset) )
offset += len(line)
return
def main(args):
cov_cutoff=args.cov_cutoff
data_name=args.data_name
sample_name_field=args.sample_name_field
splicing_event_type=args.splicing_event_type
if sample_name_field==1:
print '[INFO] Sample name parsed from bam file. (alternatively can be parsed from up level folder)'
if sample_name_field==2:
print '[INFO] Sample name parsed from folder name above the bam file. (alternatively can be parsed from bam file)'
db_dir=args.iris_db_path.rstrip('/')
#prepare files/folders in IRIS db directory
os.system('mkdir -p '+db_dir+'/'+data_name+' '+db_dir+'/'+data_name+'/splicing_matrix')
fout_path=db_dir+'/'+data_name
print '[INFO] output path: '+fout_path
fin_list=[]
sample_fin_list=[]
events_fin_list=[]
sample_size={}
sample_header=[]
file_batch_list=[]
#PARSING INPUT FILE LISTS
fin_list=[l.strip().rstrip('/')+'/JC.raw.input.'+splicing_event_type+'.txt' for l in open(args.rmats_mat_path_manifest)]
events_fin_list=[l.strip().rstrip('/')+'/fromGTF.'+splicing_event_type+'.txt' for l in open(args.rmats_mat_path_manifest)]
sample_fin_list, sample_header, sample_size= loadSamplelist(args.rmats_sample_order,sample_fin_list, sample_header,sample_name_field, sample_size)
#MAKING MERGED EVENTS LIST
total_event_list= writeMergedEvents(events_fin_list, splicing_event_type, cov_cutoff, data_name, fout_path)
if args.merge_events_only:
exit('[INFO] Done merging events only.')
print '[INFO] Done loading file dir', len(total_event_list)
#START MERGING MATRICES IN BATCH MODE FOLLOWING EVENTS LIST GENERATED.
batch=20000
file_batch_list=mergeMatrixInBatch(fin_list, events_fin_list, sample_fin_list, cov_cutoff, data_name, splicing_event_type, sample_header, sample_size, total_event_list, file_batch_list, batch, fout_path)
print '[INFO] Done merging matrices by batch.'
merged_file_name=mergeMatrixInOne(file_batch_list, cov_cutoff, data_name, splicing_event_type, fout_path)
print '[INFO] Done merging matrices: '+merged_file_name
#create index in IRIS db directory
index_PsiMatrix(fout_path+'/splicing_matrix/'+merged_file_name,fout_path+'/splicing_matrix','\t')
print '[INFO] Finished. Created matrix: '+fout_path
if __name__ == '__main__':
main()
| en | 0.285783 | #rs=map(lambda x:x.split('/')[-2],r.strip().strip(',').split(',')) #prepare files/folders in IRIS db directory #PARSING INPUT FILE LISTS #MAKING MERGED EVENTS LIST #START MERGING MATRICES IN BATCH MODE FOLLOWING EVENTS LIST GENERATED. #create index in IRIS db directory | 2.475284 | 2 |
quests/dataflow_python/streaming_event_generator.py | Glairly/introduction_to_tensorflow | 2 | 10663 | # This program reads a file representing web server logs in common log format and streams them into a PubSub topic
# with lag characteristics as determined by command-line arguments
import argparse
from google.cloud import pubsub_v1
import time
from datetime import datetime, timezone
import random
from anytree.importer import DictImporter
import json
from multiprocessing import Process
parser = argparse.ArgumentParser(__file__, description="event_generator")
parser.add_argument("--taxonomy", "-x", dest="taxonomy_fp",
help="A .json file representing a taxonomy of web resources",
default="taxonomy.json")
parser.add_argument("--users_fp", "-u", dest="users_fp",
help="A .csv file of users",
default="users.csv")
parser.add_argument("--off_to_on", "-off", dest="off_to_on_prob", type=float,
help="A float representing the probability that a user who is offline will come online",
default=.25)
parser.add_argument("--on_to_off", "-on", dest="on_to_off_prob", type=float,
help="A float representing the probability that a user who is online will go offline",
default=.1)
parser.add_argument("--max_lag_millis", '-l', dest="max_lag_millis", type=int,
help="An integer representing the maximum amount of lag in millisecond", default=250)
parser.add_argument("--project_id", "-p", type=str, dest="project_id", help="A GCP Project ID", required=True)
parser.add_argument("--topic_name", "-t", dest="topic_name", type=str,
help="The name of the topic where the messages to be published", required=True)
avg_secs_between_events = 5
args = parser.parse_args()
taxonomy_fp = args.taxonomy_fp
users_fp = args.users_fp
online_to_offline_probability = args.on_to_off_prob
offline_to_online_probability = args.off_to_on_prob
max_lag_millis = args.max_lag_millis
project_id = args.project_id
topic_name = args.topic_name
min_file_size_bytes = 100
max_file_size_bytes = 500
verbs = ["GET"]
responses = [200]
log_fields = ["ip", "user_id", "lat", "lng", "timestamp", "http_request",
"http_response", "num_bytes", "user_agent"]
def extract_resources(taxonomy_filepath):
"""
Reads a .json representing a taxonomy and returns
a data structure representing their hierarchical relationship
:param taxonomy_file: a string representing a path to a .json file
:return: Node representing root of taxonomic tree
"""
try:
with open(taxonomy_filepath, 'r') as fp:
json_str = fp.read()
json_data = json.loads(json_str)
root = DictImporter().import_(json_data)
finally:
fp.close()
return root
def read_users(users_fp):
"""
Reads a .csv from @user_fp representing users into a list of dictionaries,
each elt of which represents a user
:param user_fp: a .csv file where each line represents a user
:return: a list of dictionaries
"""
users = []
with open(users_fp, 'r') as fp:
fields = fp.readline().rstrip().split(",")
for line in fp:
user = dict(zip(fields, line.rstrip().split(",")))
users.append(user)
return users
def sleep_then_publish_burst(burst, publisher, topic_path):
"""
:param burst: a list of dictionaries, each representing an event
:param num_events_counter: an instance of Value shared by all processes
to track the number of published events
:param publisher: a PubSub publisher
:param topic_path: a topic path for PubSub
:return:
"""
sleep_secs = random.uniform(0, max_lag_millis/1000)
time.sleep(sleep_secs)
publish_burst(burst, publisher, topic_path)
def publish_burst(burst, publisher, topic_path):
"""
Publishes and prints each event
:param burst: a list of dictionaries, each representing an event
:param num_events_counter: an instance of Value shared by all processes to
track the number of published events
:param publisher: a PubSub publisher
:param topic_path: a topic path for PubSub
:return:
"""
for event_dict in burst:
json_str = json.dumps(event_dict)
data = json_str.encode('utf-8')
publisher.publish(topic_path, data=data, timestamp=event_dict['timestamp'])
def create_user_process(user, root):
"""
Code for continuously-running process representing a user publishing
events to pubsub
:param user: a dictionary representing characteristics of the user
:param root: an instance of AnyNode representing the home page of a website
:param num_events_counter: a variable shared among all processes used to track the number of events published
:return:
"""
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_name)
user['page'] = root
user['is_online'] = True
user['offline_events'] = []
while True:
time_between_events = random.uniform(0, avg_secs_between_events * 2)
time.sleep(time_between_events)
prob = random.random()
event = generate_event(user)
if user['is_online']:
if prob < online_to_offline_probability:
user['is_online'] = False
user['offline_events'] = [event]
else:
sleep_then_publish_burst([event], publisher, topic_path)
else:
user['offline_events'].append(event)
if prob < offline_to_online_probability:
user['is_online'] = True
sleep_then_publish_burst(user['offline_events'], publisher, topic_path)
user['offline_events'] = []
def generate_event(user):
"""
Returns a dictionary representing an event
:param user:
:return:
"""
user['page'] = get_next_page(user)
uri = str(user['page'].name)
event_time = datetime.now(tz=timezone.utc)
current_time_str = event_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
file_size_bytes = random.choice(range(min_file_size_bytes, max_file_size_bytes))
http_request = "\"{} {} HTTP/1.0\"".format(random.choice(verbs), uri)
http_response = random.choice(responses)
event_values = [user['ip'], user['id'], float(user['lat']), float(user['lng']), current_time_str, http_request,
http_response, file_size_bytes, user['user_agent']]
return dict(zip(log_fields, event_values))
def get_next_page(user):
"""
Consults the user's representation of the web site taxonomy to determine the next page that they visit
:param user:
:return:
"""
possible_next_pages = [user['page']]
if not user['page'].is_leaf:
possible_next_pages += list(user['page'].children)
if (user['page'].parent != None):
possible_next_pages += [user['page'].parent]
next_page = random.choice(possible_next_pages)
return next_page
if __name__ == '__main__':
users = read_users(users_fp)
root = extract_resources(taxonomy_fp)
processes = [Process(target=create_user_process, args=(user, root))
for user in users]
[process.start() for process in processes]
while True:
time.sleep(1) | # This program reads a file representing web server logs in common log format and streams them into a PubSub topic
# with lag characteristics as determined by command-line arguments
import argparse
from google.cloud import pubsub_v1
import time
from datetime import datetime, timezone
import random
from anytree.importer import DictImporter
import json
from multiprocessing import Process
parser = argparse.ArgumentParser(__file__, description="event_generator")
parser.add_argument("--taxonomy", "-x", dest="taxonomy_fp",
help="A .json file representing a taxonomy of web resources",
default="taxonomy.json")
parser.add_argument("--users_fp", "-u", dest="users_fp",
help="A .csv file of users",
default="users.csv")
parser.add_argument("--off_to_on", "-off", dest="off_to_on_prob", type=float,
help="A float representing the probability that a user who is offline will come online",
default=.25)
parser.add_argument("--on_to_off", "-on", dest="on_to_off_prob", type=float,
help="A float representing the probability that a user who is online will go offline",
default=.1)
parser.add_argument("--max_lag_millis", '-l', dest="max_lag_millis", type=int,
help="An integer representing the maximum amount of lag in millisecond", default=250)
parser.add_argument("--project_id", "-p", type=str, dest="project_id", help="A GCP Project ID", required=True)
parser.add_argument("--topic_name", "-t", dest="topic_name", type=str,
help="The name of the topic where the messages to be published", required=True)
avg_secs_between_events = 5
args = parser.parse_args()
taxonomy_fp = args.taxonomy_fp
users_fp = args.users_fp
online_to_offline_probability = args.on_to_off_prob
offline_to_online_probability = args.off_to_on_prob
max_lag_millis = args.max_lag_millis
project_id = args.project_id
topic_name = args.topic_name
min_file_size_bytes = 100
max_file_size_bytes = 500
verbs = ["GET"]
responses = [200]
log_fields = ["ip", "user_id", "lat", "lng", "timestamp", "http_request",
"http_response", "num_bytes", "user_agent"]
def extract_resources(taxonomy_filepath):
"""
Reads a .json representing a taxonomy and returns
a data structure representing their hierarchical relationship
:param taxonomy_file: a string representing a path to a .json file
:return: Node representing root of taxonomic tree
"""
try:
with open(taxonomy_filepath, 'r') as fp:
json_str = fp.read()
json_data = json.loads(json_str)
root = DictImporter().import_(json_data)
finally:
fp.close()
return root
def read_users(users_fp):
"""
Reads a .csv from @user_fp representing users into a list of dictionaries,
each elt of which represents a user
:param user_fp: a .csv file where each line represents a user
:return: a list of dictionaries
"""
users = []
with open(users_fp, 'r') as fp:
fields = fp.readline().rstrip().split(",")
for line in fp:
user = dict(zip(fields, line.rstrip().split(",")))
users.append(user)
return users
def sleep_then_publish_burst(burst, publisher, topic_path):
"""
:param burst: a list of dictionaries, each representing an event
:param num_events_counter: an instance of Value shared by all processes
to track the number of published events
:param publisher: a PubSub publisher
:param topic_path: a topic path for PubSub
:return:
"""
sleep_secs = random.uniform(0, max_lag_millis/1000)
time.sleep(sleep_secs)
publish_burst(burst, publisher, topic_path)
def publish_burst(burst, publisher, topic_path):
"""
Publishes and prints each event
:param burst: a list of dictionaries, each representing an event
:param num_events_counter: an instance of Value shared by all processes to
track the number of published events
:param publisher: a PubSub publisher
:param topic_path: a topic path for PubSub
:return:
"""
for event_dict in burst:
json_str = json.dumps(event_dict)
data = json_str.encode('utf-8')
publisher.publish(topic_path, data=data, timestamp=event_dict['timestamp'])
def create_user_process(user, root):
"""
Code for continuously-running process representing a user publishing
events to pubsub
:param user: a dictionary representing characteristics of the user
:param root: an instance of AnyNode representing the home page of a website
:param num_events_counter: a variable shared among all processes used to track the number of events published
:return:
"""
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_name)
user['page'] = root
user['is_online'] = True
user['offline_events'] = []
while True:
time_between_events = random.uniform(0, avg_secs_between_events * 2)
time.sleep(time_between_events)
prob = random.random()
event = generate_event(user)
if user['is_online']:
if prob < online_to_offline_probability:
user['is_online'] = False
user['offline_events'] = [event]
else:
sleep_then_publish_burst([event], publisher, topic_path)
else:
user['offline_events'].append(event)
if prob < offline_to_online_probability:
user['is_online'] = True
sleep_then_publish_burst(user['offline_events'], publisher, topic_path)
user['offline_events'] = []
def generate_event(user):
"""
Returns a dictionary representing an event
:param user:
:return:
"""
user['page'] = get_next_page(user)
uri = str(user['page'].name)
event_time = datetime.now(tz=timezone.utc)
current_time_str = event_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
file_size_bytes = random.choice(range(min_file_size_bytes, max_file_size_bytes))
http_request = "\"{} {} HTTP/1.0\"".format(random.choice(verbs), uri)
http_response = random.choice(responses)
event_values = [user['ip'], user['id'], float(user['lat']), float(user['lng']), current_time_str, http_request,
http_response, file_size_bytes, user['user_agent']]
return dict(zip(log_fields, event_values))
def get_next_page(user):
"""
Consults the user's representation of the web site taxonomy to determine the next page that they visit
:param user:
:return:
"""
possible_next_pages = [user['page']]
if not user['page'].is_leaf:
possible_next_pages += list(user['page'].children)
if (user['page'].parent != None):
possible_next_pages += [user['page'].parent]
next_page = random.choice(possible_next_pages)
return next_page
if __name__ == '__main__':
users = read_users(users_fp)
root = extract_resources(taxonomy_fp)
processes = [Process(target=create_user_process, args=(user, root))
for user in users]
[process.start() for process in processes]
while True:
time.sleep(1) | en | 0.873355 | # This program reads a file representing web server logs in common log format and streams them into a PubSub topic # with lag characteristics as determined by command-line arguments Reads a .json representing a taxonomy and returns
a data structure representing their hierarchical relationship
:param taxonomy_file: a string representing a path to a .json file
:return: Node representing root of taxonomic tree Reads a .csv from @user_fp representing users into a list of dictionaries,
each elt of which represents a user
:param user_fp: a .csv file where each line represents a user
:return: a list of dictionaries :param burst: a list of dictionaries, each representing an event
:param num_events_counter: an instance of Value shared by all processes
to track the number of published events
:param publisher: a PubSub publisher
:param topic_path: a topic path for PubSub
:return: Publishes and prints each event
:param burst: a list of dictionaries, each representing an event
:param num_events_counter: an instance of Value shared by all processes to
track the number of published events
:param publisher: a PubSub publisher
:param topic_path: a topic path for PubSub
:return: Code for continuously-running process representing a user publishing
events to pubsub
:param user: a dictionary representing characteristics of the user
:param root: an instance of AnyNode representing the home page of a website
:param num_events_counter: a variable shared among all processes used to track the number of events published
:return: Returns a dictionary representing an event
:param user:
:return: Consults the user's representation of the web site taxonomy to determine the next page that they visit
:param user:
:return: | 2.659251 | 3 |
src/models/configs/database.py | Nardri/rbac-service | 0 | 10664 | """Database setup"""
# Third party library
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
# initialization of the database and migration
database = SQLAlchemy()
migrate = Migrate()
| """Database setup"""
# Third party library
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
# initialization of the database and migration
database = SQLAlchemy()
migrate = Migrate()
| en | 0.772135 | Database setup # Third party library # initialization of the database and migration | 1.596766 | 2 |
postreise/plot/plot_heatmap.py | lanesmith/PostREISE | 1 | 10665 | <reponame>lanesmith/PostREISE<gh_stars>1-10
import datetime as dt
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import pandas as pd
from powersimdata.input.check import _check_time_series
from postreise.analyze.time import change_time_zone
def plot_heatmap(
series,
time_zone=None,
time_zone_label=None,
title=None,
cmap="PiYG",
scale=None,
save_filename=None,
origin="upper",
vmin=None,
vmax=None,
cbar_format=None,
cbar_tick_values=None,
cbar_label=None,
cbar_tick_labels=None,
contour_levels=None,
figsize=(16, 8),
):
"""Show time-series values via an imshow where each column is one color-coded day.
:param pandas.Series series: a time-series of values to be color-coded.
:param str time_zone: a time zone to be passed as `tz` kwarg to
:func:`postreise.analyze.time.change_time_zone`.
:param str time_zone_label: a time zone label to be added to the y axis label.
:param str title: a title to be added to the figure.
:param str/matplotlib.colors.Colormap cmap: colormap specification to be passed
as `cmap` kwarg to :func:`matplotlib.pyplot.imshow`.
:param int/float scale: a scaling factor to be applied to the series values.
:param str save_filename: a path to save the figure to.
:param str origin: the vertical location of the origin, either "upper" or "lower".
:param int/float vmin: Minimum value for coloring, to be passed as `vmin` kwarg to
:func:`matplotlib.pyplot.imshow`.
:param int/float vmax: Maximum value for coloring, to be passed as `vmax` kwarg to
:func:`matplotlib.pyplot.imshow`.
:param str/matplotlib.ticker.Formatter cbar_format: a formatter for colorbar labels,
to be passed as `format` kwarg to :func:`matplotlib.pyplot.colorbar`.
:param iterable cbar_tick_values: colorbar tick locations, to be passed as
`ticks` kwarg to :func:`matplotlib.pyplot.colorbar`.
:param str cbar_label: axis label for colorbar.
:param iterable cbar_tick_labels: colorbar tick labels.
:param iterable contour_levels: values at which to draw contours, passed as `levels`
kwarg to :func:`matplotlib.pyplot.contour`.
:param tuple(int/float, int/float) figsize: size of figure.
"""
_check_time_series(series, "series")
df = series.to_frame(name="values").asfreq("H")
year = df.index[0].year
if time_zone is not None:
df = change_time_zone(df, time_zone)
df["date"] = df.index.date
df["hour"] = df.index.hour
df_reshaped = pd.pivot(
df,
index="date",
columns="hour",
values="values",
)
xlims = mdates.date2num([df_reshaped.index[0], df_reshaped.index[-1]])
ylims = mdates.date2num([dt.datetime(year, 1, 1, 0), dt.datetime(year, 1, 1, 23)])
if scale is not None:
df_reshaped *= scale
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
# if necessary, flip ylims so labels follow data from top to bottom
extent = [*xlims, *ylims] if origin == "lower" else [*xlims, ylims[1], ylims[0]]
im = plt.imshow(
df_reshaped.T,
cmap=cmap,
aspect="auto",
extent=extent,
origin=origin,
vmin=vmin,
vmax=vmax,
)
if contour_levels is not None:
ax.contour(df_reshaped.T, extent=extent, levels=contour_levels, origin=origin)
date_format = mdates.DateFormatter("%m/%d")
ax.xaxis_date()
ax.xaxis.set_major_formatter(date_format)
ax.set_xlabel("Date")
time_format = mdates.DateFormatter("%H:%M")
ax.yaxis_date()
ax.yaxis.set_major_formatter(time_format)
y_axis_label = "Time" if time_zone_label is None else f"Time {time_zone_label}"
ax.set_ylabel(y_axis_label)
cbar = fig.colorbar(im, format=cbar_format, ticks=cbar_tick_values)
if cbar_label is not None:
cbar.set_label(cbar_label)
if title is not None:
plt.title(title)
if cbar_tick_labels is not None:
cbar.ax.set_yticklabels(cbar_tick_labels)
if save_filename is not None:
plt.savefig(save_filename, bbox_inches="tight")
| import datetime as dt
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import pandas as pd
from powersimdata.input.check import _check_time_series
from postreise.analyze.time import change_time_zone
def plot_heatmap(
series,
time_zone=None,
time_zone_label=None,
title=None,
cmap="PiYG",
scale=None,
save_filename=None,
origin="upper",
vmin=None,
vmax=None,
cbar_format=None,
cbar_tick_values=None,
cbar_label=None,
cbar_tick_labels=None,
contour_levels=None,
figsize=(16, 8),
):
"""Show time-series values via an imshow where each column is one color-coded day.
:param pandas.Series series: a time-series of values to be color-coded.
:param str time_zone: a time zone to be passed as `tz` kwarg to
:func:`postreise.analyze.time.change_time_zone`.
:param str time_zone_label: a time zone label to be added to the y axis label.
:param str title: a title to be added to the figure.
:param str/matplotlib.colors.Colormap cmap: colormap specification to be passed
as `cmap` kwarg to :func:`matplotlib.pyplot.imshow`.
:param int/float scale: a scaling factor to be applied to the series values.
:param str save_filename: a path to save the figure to.
:param str origin: the vertical location of the origin, either "upper" or "lower".
:param int/float vmin: Minimum value for coloring, to be passed as `vmin` kwarg to
:func:`matplotlib.pyplot.imshow`.
:param int/float vmax: Maximum value for coloring, to be passed as `vmax` kwarg to
:func:`matplotlib.pyplot.imshow`.
:param str/matplotlib.ticker.Formatter cbar_format: a formatter for colorbar labels,
to be passed as `format` kwarg to :func:`matplotlib.pyplot.colorbar`.
:param iterable cbar_tick_values: colorbar tick locations, to be passed as
`ticks` kwarg to :func:`matplotlib.pyplot.colorbar`.
:param str cbar_label: axis label for colorbar.
:param iterable cbar_tick_labels: colorbar tick labels.
:param iterable contour_levels: values at which to draw contours, passed as `levels`
kwarg to :func:`matplotlib.pyplot.contour`.
:param tuple(int/float, int/float) figsize: size of figure.
"""
_check_time_series(series, "series")
df = series.to_frame(name="values").asfreq("H")
year = df.index[0].year
if time_zone is not None:
df = change_time_zone(df, time_zone)
df["date"] = df.index.date
df["hour"] = df.index.hour
df_reshaped = pd.pivot(
df,
index="date",
columns="hour",
values="values",
)
xlims = mdates.date2num([df_reshaped.index[0], df_reshaped.index[-1]])
ylims = mdates.date2num([dt.datetime(year, 1, 1, 0), dt.datetime(year, 1, 1, 23)])
if scale is not None:
df_reshaped *= scale
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
# if necessary, flip ylims so labels follow data from top to bottom
extent = [*xlims, *ylims] if origin == "lower" else [*xlims, ylims[1], ylims[0]]
im = plt.imshow(
df_reshaped.T,
cmap=cmap,
aspect="auto",
extent=extent,
origin=origin,
vmin=vmin,
vmax=vmax,
)
if contour_levels is not None:
ax.contour(df_reshaped.T, extent=extent, levels=contour_levels, origin=origin)
date_format = mdates.DateFormatter("%m/%d")
ax.xaxis_date()
ax.xaxis.set_major_formatter(date_format)
ax.set_xlabel("Date")
time_format = mdates.DateFormatter("%H:%M")
ax.yaxis_date()
ax.yaxis.set_major_formatter(time_format)
y_axis_label = "Time" if time_zone_label is None else f"Time {time_zone_label}"
ax.set_ylabel(y_axis_label)
cbar = fig.colorbar(im, format=cbar_format, ticks=cbar_tick_values)
if cbar_label is not None:
cbar.set_label(cbar_label)
if title is not None:
plt.title(title)
if cbar_tick_labels is not None:
cbar.ax.set_yticklabels(cbar_tick_labels)
if save_filename is not None:
plt.savefig(save_filename, bbox_inches="tight") | en | 0.558833 | Show time-series values via an imshow where each column is one color-coded day. :param pandas.Series series: a time-series of values to be color-coded. :param str time_zone: a time zone to be passed as `tz` kwarg to :func:`postreise.analyze.time.change_time_zone`. :param str time_zone_label: a time zone label to be added to the y axis label. :param str title: a title to be added to the figure. :param str/matplotlib.colors.Colormap cmap: colormap specification to be passed as `cmap` kwarg to :func:`matplotlib.pyplot.imshow`. :param int/float scale: a scaling factor to be applied to the series values. :param str save_filename: a path to save the figure to. :param str origin: the vertical location of the origin, either "upper" or "lower". :param int/float vmin: Minimum value for coloring, to be passed as `vmin` kwarg to :func:`matplotlib.pyplot.imshow`. :param int/float vmax: Maximum value for coloring, to be passed as `vmax` kwarg to :func:`matplotlib.pyplot.imshow`. :param str/matplotlib.ticker.Formatter cbar_format: a formatter for colorbar labels, to be passed as `format` kwarg to :func:`matplotlib.pyplot.colorbar`. :param iterable cbar_tick_values: colorbar tick locations, to be passed as `ticks` kwarg to :func:`matplotlib.pyplot.colorbar`. :param str cbar_label: axis label for colorbar. :param iterable cbar_tick_labels: colorbar tick labels. :param iterable contour_levels: values at which to draw contours, passed as `levels` kwarg to :func:`matplotlib.pyplot.contour`. :param tuple(int/float, int/float) figsize: size of figure. # if necessary, flip ylims so labels follow data from top to bottom | 2.862601 | 3 |
tensorflow_federated/python/simulation/file_per_user_client_data.py | houcharlie/federated-legacy | 0 | 10666 | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of the ClientData abstract base class."""
import collections
import os.path
from typing import Callable, Mapping
import tensorflow as tf
from tensorflow_federated.python import core as tff
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.simulation import client_data
from tensorflow_federated.python.tensorflow_libs import tensor_utils
class FilePerUserClientData(client_data.ClientData):
"""A `tf.simulation.ClientData` that maps a set of files to a dataset.
This mapping is restricted to one file per user.
"""
def __init__(self, client_ids_to_files: Mapping[str, str],
dataset_fn: Callable[[str], tf.data.Dataset]):
"""Constructs a `tf.simulation.ClientData` object.
Args:
client_ids_to_files: A mapping from string client IDs to filepaths
containing the user's data.
dataset_fn: A factory function that takes a filepath (must accept
both strings and tensors) and returns a `tf.data.Dataset` corresponding
to this path.
"""
py_typecheck.check_type(client_ids_to_files, collections.abc.Mapping)
if not client_ids_to_files:
raise ValueError('`client_ids` must have at least one client ID')
py_typecheck.check_callable(dataset_fn)
self._client_ids = sorted(client_ids_to_files.keys())
def create_dataset_for_filename_fn(client_id):
return dataset_fn(client_ids_to_files[client_id])
@tff.tf_computation(tf.string)
def dataset_computation(client_id):
client_ids_to_path = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
list(client_ids_to_files.keys()),
list(client_ids_to_files.values())), '')
client_path = client_ids_to_path.lookup(client_id)
return dataset_fn(client_path)
self._create_tf_dataset_fn = create_dataset_for_filename_fn
self._dataset_computation = dataset_computation
g = tf.Graph()
with g.as_default():
tf_dataset = self._create_tf_dataset_fn(self._client_ids[0])
self._element_type_structure = tf_dataset.element_spec
@property
def client_ids(self):
return self._client_ids
def create_tf_dataset_for_client(self, client_id):
tf_dataset = self._create_tf_dataset_fn(client_id)
tensor_utils.check_nested_equal(tf_dataset.element_spec,
self._element_type_structure)
return tf_dataset
@property
def element_type_structure(self):
return self._element_type_structure
@classmethod
def create_from_dir(cls, path, create_tf_dataset_fn=tf.data.TFRecordDataset):
"""Builds a `tff.simulation.FilePerUserClientData`.
Iterates over all files in `path`, using the filename as the client ID. Does
not recursively search `path`.
Args:
path: A directory path to search for per-client files.
create_tf_dataset_fn: A callable that creates a `tf.data.Datasaet` object
for a given file in the directory specified in `path`.
Returns:
A `tff.simulation.FilePerUserClientData` object.
"""
client_ids_to_paths_dict = {
filename: os.path.join(path, filename)
for filename in tf.io.gfile.listdir(path)
}
return FilePerUserClientData(client_ids_to_paths_dict, create_tf_dataset_fn)
@property
def dataset_computation(self):
return self._dataset_computation
| # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of the ClientData abstract base class."""
import collections
import os.path
from typing import Callable, Mapping
import tensorflow as tf
from tensorflow_federated.python import core as tff
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.simulation import client_data
from tensorflow_federated.python.tensorflow_libs import tensor_utils
class FilePerUserClientData(client_data.ClientData):
"""A `tf.simulation.ClientData` that maps a set of files to a dataset.
This mapping is restricted to one file per user.
"""
def __init__(self, client_ids_to_files: Mapping[str, str],
dataset_fn: Callable[[str], tf.data.Dataset]):
"""Constructs a `tf.simulation.ClientData` object.
Args:
client_ids_to_files: A mapping from string client IDs to filepaths
containing the user's data.
dataset_fn: A factory function that takes a filepath (must accept
both strings and tensors) and returns a `tf.data.Dataset` corresponding
to this path.
"""
py_typecheck.check_type(client_ids_to_files, collections.abc.Mapping)
if not client_ids_to_files:
raise ValueError('`client_ids` must have at least one client ID')
py_typecheck.check_callable(dataset_fn)
self._client_ids = sorted(client_ids_to_files.keys())
def create_dataset_for_filename_fn(client_id):
return dataset_fn(client_ids_to_files[client_id])
@tff.tf_computation(tf.string)
def dataset_computation(client_id):
client_ids_to_path = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
list(client_ids_to_files.keys()),
list(client_ids_to_files.values())), '')
client_path = client_ids_to_path.lookup(client_id)
return dataset_fn(client_path)
self._create_tf_dataset_fn = create_dataset_for_filename_fn
self._dataset_computation = dataset_computation
g = tf.Graph()
with g.as_default():
tf_dataset = self._create_tf_dataset_fn(self._client_ids[0])
self._element_type_structure = tf_dataset.element_spec
@property
def client_ids(self):
return self._client_ids
def create_tf_dataset_for_client(self, client_id):
tf_dataset = self._create_tf_dataset_fn(client_id)
tensor_utils.check_nested_equal(tf_dataset.element_spec,
self._element_type_structure)
return tf_dataset
@property
def element_type_structure(self):
return self._element_type_structure
@classmethod
def create_from_dir(cls, path, create_tf_dataset_fn=tf.data.TFRecordDataset):
"""Builds a `tff.simulation.FilePerUserClientData`.
Iterates over all files in `path`, using the filename as the client ID. Does
not recursively search `path`.
Args:
path: A directory path to search for per-client files.
create_tf_dataset_fn: A callable that creates a `tf.data.Datasaet` object
for a given file in the directory specified in `path`.
Returns:
A `tff.simulation.FilePerUserClientData` object.
"""
client_ids_to_paths_dict = {
filename: os.path.join(path, filename)
for filename in tf.io.gfile.listdir(path)
}
return FilePerUserClientData(client_ids_to_paths_dict, create_tf_dataset_fn)
@property
def dataset_computation(self):
return self._dataset_computation
| en | 0.755005 | # Copyright 2018, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Implementations of the ClientData abstract base class. A `tf.simulation.ClientData` that maps a set of files to a dataset. This mapping is restricted to one file per user. Constructs a `tf.simulation.ClientData` object. Args: client_ids_to_files: A mapping from string client IDs to filepaths containing the user's data. dataset_fn: A factory function that takes a filepath (must accept both strings and tensors) and returns a `tf.data.Dataset` corresponding to this path. Builds a `tff.simulation.FilePerUserClientData`. Iterates over all files in `path`, using the filename as the client ID. Does not recursively search `path`. Args: path: A directory path to search for per-client files. create_tf_dataset_fn: A callable that creates a `tf.data.Datasaet` object for a given file in the directory specified in `path`. Returns: A `tff.simulation.FilePerUserClientData` object. | 2.130845 | 2 |
20-Blog_Clone_Project/blog_project_Practice/blog/admin.py | andy2167565/Django-Bootcamp-Practice | 0 | 10667 | from django.contrib import admin
from blog.models import Post, Comment
# Register your models here.
admin.site.register(Post)
admin.site.register(Comment)
| from django.contrib import admin
from blog.models import Post, Comment
# Register your models here.
admin.site.register(Post)
admin.site.register(Comment)
| en | 0.968259 | # Register your models here. | 1.392694 | 1 |
tests/compilation/request/test_request_compiler.py | ymoch/preacher | 3 | 10668 | <reponame>ymoch/preacher<gh_stars>1-10
from unittest.mock import NonCallableMock, sentinel
from pytest import mark, raises, fixture
from preacher.compilation.argument import Argument
from preacher.compilation.error import CompilationError, NamedNode, IndexedNode
from preacher.compilation.request.request import RequestCompiler, RequestCompiled
from preacher.compilation.request.request_body import RequestBodyCompiler
from preacher.core.request import Method
PKG = "preacher.compilation.request.request"
@fixture
def body():
body = NonCallableMock(RequestBodyCompiler)
body.of_default.return_value = sentinel.new_body_compiler
return body
@fixture
def default() -> RequestCompiled:
return RequestCompiled(
method=sentinel.default_method,
path=sentinel.default_path,
headers=sentinel.default_headers,
params=sentinel.default_params,
body=sentinel.default_body,
)
@fixture
def compiler(body, default: RequestCompiled) -> RequestCompiler:
return RequestCompiler(body=body, default=default)
@mark.parametrize(
("obj", "expected_path"),
(
([], []),
({"method": 1}, [NamedNode("method")]),
({"method": "invalid"}, [NamedNode("method")]),
({"path": {"key": "value"}}, [NamedNode("path")]),
({"headers": ""}, [NamedNode("headers")]),
({"headers": {"int": 1}}, [NamedNode("headers")]),
({"headers": {1: "not-a-string-key"}}, [NamedNode("headers")]),
),
)
def test_given_an_invalid_obj(compiler: RequestCompiler, obj, expected_path):
with raises(CompilationError) as error_info:
compiler.compile(obj)
assert error_info.value.path == expected_path
def test_given_an_empty_mapping(compiler: RequestCompiler):
compiled = compiler.compile({})
assert compiled.method is sentinel.default_method
assert compiled.path is sentinel.default_path
assert compiled.headers is sentinel.default_headers
assert compiled.params is sentinel.default_params
assert compiled.body is sentinel.default_body
@mark.parametrize(
("method_obj", "expected"),
(
("get", Method.GET),
("POST", Method.POST),
("Put", Method.PUT),
("Delete", Method.DELETE),
),
)
def test_given_a_valid_method(compiler: RequestCompiler, method_obj, expected):
obj = {"method": Argument("method")}
arguments = {"method": method_obj}
compiled = compiler.compile(obj, arguments)
assert compiled.method is expected
@mark.parametrize(
("headers_obj", "expected"),
(
({}, {}),
({"null": None, "empty": ""}, {"empty": ""}),
({"n1": "v1", "n2": "v2"}, {"n1": "v1", "n2": "v2"}),
),
)
def test_given_valid_headers(compiler: RequestCompiler, headers_obj, expected):
obj = {"headers": Argument("headers")}
arguments = {"headers": headers_obj}
compiled = compiler.compile(obj, arguments)
assert compiled.headers == expected
def test_given_an_invalid_params(compiler: RequestCompiler, mocker):
compile_params = mocker.patch(f"{PKG}.compile_url_params")
compile_params.side_effect = CompilationError("msg", node=NamedNode("x"))
with raises(CompilationError) as error_info:
compiler.compile({"params": sentinel.params})
assert error_info.value.path == [NamedNode("params"), NamedNode("x")]
compile_params.assert_called_once_with(sentinel.params, None)
def test_given_valid_params(compiler: RequestCompiler, mocker):
compile_params = mocker.patch(f"{PKG}.compile_url_params")
compile_params.return_value = sentinel.compiled_params
compiled = compiler.compile({"params": sentinel.params}, sentinel.args)
assert compiled.params == sentinel.compiled_params
compile_params.assert_called_once_with(sentinel.params, sentinel.args)
def test_given_invalid_body(compiler: RequestCompiler, body):
body.compile.side_effect = CompilationError("x", node=IndexedNode(1))
with raises(CompilationError) as error_info:
compiler.compile({"body": sentinel.body_obj})
assert error_info.value.path == [NamedNode("body"), IndexedNode(1)]
body.compile.assert_called_once_with(sentinel.body_obj, None)
def test_given_valid_body(compiler: RequestCompiler, body):
body.compile.return_value = sentinel.body
compiled = compiler.compile({"body": sentinel.body_obj}, sentinel.args)
assert compiled.body is sentinel.body
body.compile.assert_called_once_with(sentinel.body_obj, sentinel.args)
def test_given_a_string(compiler: RequestCompiler):
compiled = compiler.compile(Argument("path"), {"path": "/path"})
assert compiled.method is sentinel.default_method
assert compiled.path == "/path"
assert compiled.headers is sentinel.default_headers
assert compiled.params is sentinel.default_params
assert compiled.body is sentinel.default_body
def test_of_default_no_body(compiler: RequestCompiler, body, mocker):
ctor = mocker.patch(f"{PKG}.RequestCompiler")
ctor.return_value = sentinel.compiler_of_default
new_default = RequestCompiled(
method=sentinel.new_default_method,
path=sentinel.new_default_path,
headers=sentinel.new_default_headers,
params=sentinel.new_default_params,
)
new_compiler = compiler.of_default(new_default)
assert new_compiler is sentinel.compiler_of_default
ctor.assert_called_once_with(
body=body,
default=RequestCompiled(
method=sentinel.new_default_method,
path=sentinel.new_default_path,
headers=sentinel.new_default_headers,
params=sentinel.new_default_params,
body=sentinel.default_body,
),
)
body.of_default.assert_not_called()
def test_of_default_body(compiler: RequestCompiler, body, mocker):
ctor = mocker.patch(f"{PKG}.RequestCompiler")
ctor.return_value = sentinel.compiler_of_default
new_default = RequestCompiled(body=sentinel.new_default_body)
new_compiler = compiler.of_default(new_default)
assert new_compiler is sentinel.compiler_of_default
ctor.assert_called_once_with(
body=sentinel.new_body_compiler,
default=RequestCompiled(
method=sentinel.default_method,
path=sentinel.default_path,
headers=sentinel.default_headers,
params=sentinel.default_params,
body=sentinel.new_default_body,
),
)
body.of_default.assert_called_once_with(sentinel.new_default_body)
| from unittest.mock import NonCallableMock, sentinel
from pytest import mark, raises, fixture
from preacher.compilation.argument import Argument
from preacher.compilation.error import CompilationError, NamedNode, IndexedNode
from preacher.compilation.request.request import RequestCompiler, RequestCompiled
from preacher.compilation.request.request_body import RequestBodyCompiler
from preacher.core.request import Method
PKG = "preacher.compilation.request.request"
@fixture
def body():
body = NonCallableMock(RequestBodyCompiler)
body.of_default.return_value = sentinel.new_body_compiler
return body
@fixture
def default() -> RequestCompiled:
return RequestCompiled(
method=sentinel.default_method,
path=sentinel.default_path,
headers=sentinel.default_headers,
params=sentinel.default_params,
body=sentinel.default_body,
)
@fixture
def compiler(body, default: RequestCompiled) -> RequestCompiler:
return RequestCompiler(body=body, default=default)
@mark.parametrize(
("obj", "expected_path"),
(
([], []),
({"method": 1}, [NamedNode("method")]),
({"method": "invalid"}, [NamedNode("method")]),
({"path": {"key": "value"}}, [NamedNode("path")]),
({"headers": ""}, [NamedNode("headers")]),
({"headers": {"int": 1}}, [NamedNode("headers")]),
({"headers": {1: "not-a-string-key"}}, [NamedNode("headers")]),
),
)
def test_given_an_invalid_obj(compiler: RequestCompiler, obj, expected_path):
with raises(CompilationError) as error_info:
compiler.compile(obj)
assert error_info.value.path == expected_path
def test_given_an_empty_mapping(compiler: RequestCompiler):
compiled = compiler.compile({})
assert compiled.method is sentinel.default_method
assert compiled.path is sentinel.default_path
assert compiled.headers is sentinel.default_headers
assert compiled.params is sentinel.default_params
assert compiled.body is sentinel.default_body
@mark.parametrize(
("method_obj", "expected"),
(
("get", Method.GET),
("POST", Method.POST),
("Put", Method.PUT),
("Delete", Method.DELETE),
),
)
def test_given_a_valid_method(compiler: RequestCompiler, method_obj, expected):
obj = {"method": Argument("method")}
arguments = {"method": method_obj}
compiled = compiler.compile(obj, arguments)
assert compiled.method is expected
@mark.parametrize(
("headers_obj", "expected"),
(
({}, {}),
({"null": None, "empty": ""}, {"empty": ""}),
({"n1": "v1", "n2": "v2"}, {"n1": "v1", "n2": "v2"}),
),
)
def test_given_valid_headers(compiler: RequestCompiler, headers_obj, expected):
obj = {"headers": Argument("headers")}
arguments = {"headers": headers_obj}
compiled = compiler.compile(obj, arguments)
assert compiled.headers == expected
def test_given_an_invalid_params(compiler: RequestCompiler, mocker):
compile_params = mocker.patch(f"{PKG}.compile_url_params")
compile_params.side_effect = CompilationError("msg", node=NamedNode("x"))
with raises(CompilationError) as error_info:
compiler.compile({"params": sentinel.params})
assert error_info.value.path == [NamedNode("params"), NamedNode("x")]
compile_params.assert_called_once_with(sentinel.params, None)
def test_given_valid_params(compiler: RequestCompiler, mocker):
compile_params = mocker.patch(f"{PKG}.compile_url_params")
compile_params.return_value = sentinel.compiled_params
compiled = compiler.compile({"params": sentinel.params}, sentinel.args)
assert compiled.params == sentinel.compiled_params
compile_params.assert_called_once_with(sentinel.params, sentinel.args)
def test_given_invalid_body(compiler: RequestCompiler, body):
body.compile.side_effect = CompilationError("x", node=IndexedNode(1))
with raises(CompilationError) as error_info:
compiler.compile({"body": sentinel.body_obj})
assert error_info.value.path == [NamedNode("body"), IndexedNode(1)]
body.compile.assert_called_once_with(sentinel.body_obj, None)
def test_given_valid_body(compiler: RequestCompiler, body):
body.compile.return_value = sentinel.body
compiled = compiler.compile({"body": sentinel.body_obj}, sentinel.args)
assert compiled.body is sentinel.body
body.compile.assert_called_once_with(sentinel.body_obj, sentinel.args)
def test_given_a_string(compiler: RequestCompiler):
compiled = compiler.compile(Argument("path"), {"path": "/path"})
assert compiled.method is sentinel.default_method
assert compiled.path == "/path"
assert compiled.headers is sentinel.default_headers
assert compiled.params is sentinel.default_params
assert compiled.body is sentinel.default_body
def test_of_default_no_body(compiler: RequestCompiler, body, mocker):
ctor = mocker.patch(f"{PKG}.RequestCompiler")
ctor.return_value = sentinel.compiler_of_default
new_default = RequestCompiled(
method=sentinel.new_default_method,
path=sentinel.new_default_path,
headers=sentinel.new_default_headers,
params=sentinel.new_default_params,
)
new_compiler = compiler.of_default(new_default)
assert new_compiler is sentinel.compiler_of_default
ctor.assert_called_once_with(
body=body,
default=RequestCompiled(
method=sentinel.new_default_method,
path=sentinel.new_default_path,
headers=sentinel.new_default_headers,
params=sentinel.new_default_params,
body=sentinel.default_body,
),
)
body.of_default.assert_not_called()
def test_of_default_body(compiler: RequestCompiler, body, mocker):
ctor = mocker.patch(f"{PKG}.RequestCompiler")
ctor.return_value = sentinel.compiler_of_default
new_default = RequestCompiled(body=sentinel.new_default_body)
new_compiler = compiler.of_default(new_default)
assert new_compiler is sentinel.compiler_of_default
ctor.assert_called_once_with(
body=sentinel.new_body_compiler,
default=RequestCompiled(
method=sentinel.default_method,
path=sentinel.default_path,
headers=sentinel.default_headers,
params=sentinel.default_params,
body=sentinel.new_default_body,
),
)
body.of_default.assert_called_once_with(sentinel.new_default_body) | none | 1 | 2.264091 | 2 |
|
bot/conversation_handlers/stage01.py | gerbigtim/coaching_bot | 0 | 10669 | # imports
from telegram.ext import (
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
)
from handler_functions.start import start
from handler_functions.bio import bio
from handler_functions.gender import gender
from handler_functions.photo import photo, skip_photo
from handler_functions.location import location, skip_location
from handler_functions.cancel import cancel
from conversation_handlers.stage_constants import *
# Adds conversation handler with the states GENDER, PHOTO, LOCATION and BIO for stage 1 of the sign up
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
GENDER: [MessageHandler(Filters.regex('^(Gentleman|Lady|I am a unicorn.)$'), gender)],
PHOTO: [MessageHandler(Filters.photo, photo), CommandHandler('skip', skip_photo)],
LOCATION: [
MessageHandler(Filters.location, location),
CommandHandler('skip', skip_location),
],
BIO: [MessageHandler(Filters.text & ~Filters.command, bio)],
},
fallbacks=[CommandHandler('cancel', cancel)],
) | # imports
from telegram.ext import (
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
)
from handler_functions.start import start
from handler_functions.bio import bio
from handler_functions.gender import gender
from handler_functions.photo import photo, skip_photo
from handler_functions.location import location, skip_location
from handler_functions.cancel import cancel
from conversation_handlers.stage_constants import *
# Adds conversation handler with the states GENDER, PHOTO, LOCATION and BIO for stage 1 of the sign up
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
GENDER: [MessageHandler(Filters.regex('^(Gentleman|Lady|I am a unicorn.)$'), gender)],
PHOTO: [MessageHandler(Filters.photo, photo), CommandHandler('skip', skip_photo)],
LOCATION: [
MessageHandler(Filters.location, location),
CommandHandler('skip', skip_location),
],
BIO: [MessageHandler(Filters.text & ~Filters.command, bio)],
},
fallbacks=[CommandHandler('cancel', cancel)],
) | en | 0.789791 | # imports # Adds conversation handler with the states GENDER, PHOTO, LOCATION and BIO for stage 1 of the sign up | 2.080321 | 2 |
python100days/day03/conversion.py | lanSeFangZhou/pythonbase | 0 | 10670 | <reponame>lanSeFangZhou/pythonbase
# 英制单位英寸和公制单位厘米互换
value =float(input('请输入长度:'))
unit =input('请输入单位:')
if unit == 'in' or unit == '英寸':
print('%f英寸 = %f厘米' % (value, value * 2.54))
elif unit == '厘米' or unit == 'cm':
print('%f 厘米 = %f英寸' % (value, value / 2.54))
else:
print('请输入有效的单位') | # 英制单位英寸和公制单位厘米互换
value =float(input('请输入长度:'))
unit =input('请输入单位:')
if unit == 'in' or unit == '英寸':
print('%f英寸 = %f厘米' % (value, value * 2.54))
elif unit == '厘米' or unit == 'cm':
print('%f 厘米 = %f英寸' % (value, value / 2.54))
else:
print('请输入有效的单位') | zh | 0.973563 | # 英制单位英寸和公制单位厘米互换 | 3.973188 | 4 |
tests/models/test_dtfactory.py | surajsjain/ocean.py | 4 | 10671 | from ocean_lib.models.data_token import DataToken
from ocean_lib.models.dtfactory import DTFactory
from ocean_lib.ocean.util import to_base_18
def test1(network, alice_wallet, dtfactory_address):
dtfactory = DTFactory(dtfactory_address)
dt_address = dtfactory.createToken('foo_blob', 'DT1', 'DT1', to_base_18(1000), from_wallet=alice_wallet)
dt = DataToken(dtfactory.get_token_address(dt_address))
assert isinstance(dt, DataToken)
assert dt.blob() == 'foo_blob'
| from ocean_lib.models.data_token import DataToken
from ocean_lib.models.dtfactory import DTFactory
from ocean_lib.ocean.util import to_base_18
def test1(network, alice_wallet, dtfactory_address):
dtfactory = DTFactory(dtfactory_address)
dt_address = dtfactory.createToken('foo_blob', 'DT1', 'DT1', to_base_18(1000), from_wallet=alice_wallet)
dt = DataToken(dtfactory.get_token_address(dt_address))
assert isinstance(dt, DataToken)
assert dt.blob() == 'foo_blob'
| none | 1 | 2.152338 | 2 |
|
methods/unilm_based/unilm/src/pytorch_pretrained_bert/optimization_fp16.py | Guaguago/CommonGen | 100 | 10672 | # coding=utf-8
"""PyTorch optimization for BERT model."""
from apex.contrib.optimizers import FP16_Optimizer
class FP16_Optimizer_State(FP16_Optimizer):
def __init__(self,
init_optimizer,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True):
super(FP16_Optimizer_State, self).__init__(init_optimizer,
static_loss_scale, dynamic_loss_scale, dynamic_loss_args, verbose)
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['cur_scale'] = self.cur_scale
state_dict['cur_iter'] = self.cur_iter
if state_dict['dynamic_loss_scale']:
state_dict['last_overflow_iter'] = self.last_overflow_iter
state_dict['scale_factor'] = self.scale_factor
state_dict['scale_window'] = self.scale_window
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
state_dict['fp32_groups_flat'] = self.fp32_groups_flat
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.cur_scale = state_dict['cur_scale']
self.cur_iter = state_dict['cur_iter']
if state_dict['dynamic_loss_scale']:
self.last_overflow_iter = state_dict['last_overflow_iter']
self.scale_factor = state_dict['scale_factor']
self.scale_window = state_dict['scale_window']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current, saved in zip(self.fp32_groups_flat, state_dict['fp32_groups_flat']):
current.data.copy_(saved.data)
| # coding=utf-8
"""PyTorch optimization for BERT model."""
from apex.contrib.optimizers import FP16_Optimizer
class FP16_Optimizer_State(FP16_Optimizer):
def __init__(self,
init_optimizer,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True):
super(FP16_Optimizer_State, self).__init__(init_optimizer,
static_loss_scale, dynamic_loss_scale, dynamic_loss_args, verbose)
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['cur_scale'] = self.cur_scale
state_dict['cur_iter'] = self.cur_iter
if state_dict['dynamic_loss_scale']:
state_dict['last_overflow_iter'] = self.last_overflow_iter
state_dict['scale_factor'] = self.scale_factor
state_dict['scale_window'] = self.scale_window
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
state_dict['fp32_groups_flat'] = self.fp32_groups_flat
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.cur_scale = state_dict['cur_scale']
self.cur_iter = state_dict['cur_iter']
if state_dict['dynamic_loss_scale']:
self.last_overflow_iter = state_dict['last_overflow_iter']
self.scale_factor = state_dict['scale_factor']
self.scale_window = state_dict['scale_window']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current, saved in zip(self.fp32_groups_flat, state_dict['fp32_groups_flat']):
current.data.copy_(saved.data)
| en | 0.819276 | # coding=utf-8 PyTorch optimization for BERT model. Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict of the contained Pytorch optimizer. Example:: checkpoint = {} checkpoint['model'] = model.state_dict() checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, "saved.pth") Loads a state_dict created by an earlier call to state_dict(). If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, whose parameters in turn came from ``model``, it is expected that the user will call ``model.load_state_dict()`` before ``fp16_optimizer_instance.load_state_dict()`` is called. Example:: model = torch.nn.Linear(D_in, D_out).cuda().half() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... checkpoint = torch.load("saved.pth") model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) # I think it should actually be ok to reload the optimizer before the model. # At this point, the optimizer's references to the model's fp32 parameters are up to date. # The optimizer's hyperparameters and internal buffers are also up to date. # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still # out of date. There are two options. # 1: Refresh the master params from the model's fp16 params. # This requires less storage but incurs precision loss. # 2: Save and restore the fp32 master copies separately. # We choose option 2. # # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device # of their associated parameters, because it's possible those buffers might not exist yet in # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been # constructed in the same way as the one whose state_dict we are loading, the same master params # are guaranteed to exist, so we can just copy_() from the saved master params. | 2.504011 | 3 |
ermaket/api/generation/__init__.py | SqrtMinusOne/ERMaket_Experiment | 0 | 10673 | from .generator import *
from .types import *
| from .generator import *
from .types import *
| none | 1 | 1.047746 | 1 |
|
Source/stack0verf10w.py | IRIDIUM-SUB/Software-Security-Course-Design | 0 | 10674 | <gh_stars>0
import Bugdetectionuniversalframe
import os
import re
class overflowdetection(Bugdetectionuniversalframe.uniframe):
def __init__(self):
Bugdetectionuniversalframe.uniframe.__init__(self)
def deploy(self):#Re-write deploy method
flag=0
self.filesort()
if self.path != "":
command=" splint +weak +bounds -hints -varuse +posixlib "+self.path
os.system(command)
r= os.popen(command)
textlist=r.readlines()
final=""
for text in textlist:
#print(text) # 打印cmd输出结果
final=final+text
if re.search(r"out-of-bounds|buffer overflow",text):
flag=1
if flag:
final=final+"\n Looks like there is a stack overflow vulnerability."
else:
final="Seems no overflow vulnerability."
self.toolbox.textupdate(self.tokentext,final)
| import Bugdetectionuniversalframe
import os
import re
class overflowdetection(Bugdetectionuniversalframe.uniframe):
def __init__(self):
Bugdetectionuniversalframe.uniframe.__init__(self)
def deploy(self):#Re-write deploy method
flag=0
self.filesort()
if self.path != "":
command=" splint +weak +bounds -hints -varuse +posixlib "+self.path
os.system(command)
r= os.popen(command)
textlist=r.readlines()
final=""
for text in textlist:
#print(text) # 打印cmd输出结果
final=final+text
if re.search(r"out-of-bounds|buffer overflow",text):
flag=1
if flag:
final=final+"\n Looks like there is a stack overflow vulnerability."
else:
final="Seems no overflow vulnerability."
self.toolbox.textupdate(self.tokentext,final) | en | 0.537403 | #Re-write deploy method #print(text) # 打印cmd输出结果 | 2.741443 | 3 |
ndbc/station.py | ppokhrel1/ndbc | 0 | 10675 | <gh_stars>0
"""
station.py
"""
from datetime import datetime, timedelta
import gzip
import numpy as np
import requests
import urllib
_BASEURL = 'http://www.ndbc.noaa.gov/data'
_SENSOR_URL = _BASEURL+'/stations/buoyht.txt'
_REALTIME_URL = _BASEURL+'/realtime2/'
_RECENT_URL = _BASEURL+'/stdmet/'
_HISTORICAL_URL = _BASEURL+'/historical/stdmet/'
_STATION_URL = _BASEURL+'/stations/station_table.txt'
class Station(object):
"""NDBC Station class."""
def __init__(self, station_id, starttime=None, endtime=None):
self.id = str(station_id)
self.time = []; self.wspd = []; self.wdir = []; self.gst = []
self.wvht = []; self.dpd = []; self.apd = []; self.mwd = []
self.pres = []; self.atmp = []; self.wtmp = []; self.dewp = []
self.vis = []; self.ptdy = []; self.tide = []
self._get_info()
if starttime and endtime:
self.get_stdmet(starttime, endtime)
def _get_info(self):
"""Collects station metadata."""
r = requests.get(_STATION_URL)
if not r.status_code == 200:
raise RuntimeError('Received response status '
+str(r.status_code)+' from '+_STATION_URL)
lines = r.text.split('\n')
try:
data = [line for line in lines if self.id == line[:5]].pop()
except IndexError:
raise ValueError('Station '+self.id+' not found in '+_STATION_URL)
station_id, self.owner, self.ttype, self.hull, self.name, self.payload,\
self.location, self.timezone, self.forecast, self.note = data.split('|')
loc = self.location.split()
self.lat, self.lon = float(loc[0]), float(loc[2])
if loc[1] == 'S':
self.lat = -self.lat
if loc[3] == 'W':
self.lon = -self.lon
def get_stdmet(self, starttime, endtime):
"""Gets the standard meteorological data given start and end times."""
# re-initialize if we are to overwrite data
#if self.time != [] and self.time != None :
self.__init__(self.id)
if starttime.year < datetime.utcnow().year:
datatype = 'historical'
elif starttime > datetime.utcnow() - timedelta(days=45):
self._get_stdmet_realtime()
return
elif starttime.year == datetime.utcnow().year:
datatype = 'recent'
else:
raise ValueError('starttime cannot be in the future')
time = starttime
while True:
if datatype == 'historical':
filename = self.id+'h'+str(time.year)+'.txt.gz'
fileurl = _HISTORICAL_URL+filename
elif datatype == 'recent':
filename = self.id+str(time.month)+str(time.year)+'.txt.gz'
fileurl = _RECENT_URL+time.strftime('%b')+'/'+filename
f = gzip.open(urllib.request.urlopen(fileurl))
if time.year >= 2007:
datastart = 2
else:
datastart = 1
lines = [line.decode().strip() for line in f.readlines()]
for line in lines[datastart:]:
line = line.split()
try:
self.time.append(datetime.strptime(''.join(line[:5]), '%Y%m%d%H%M'))
nn = 5
except ValueError:
self.time.append(datetime.strptime(''.join(line[:4]), '%Y%m%d%H'))
nn = 4
self.wdir.append(np.nan if line[nn] == '999' else float(line[nn]))
self.wspd.append(np.nan if line[nn+1] == '99.0' else float(line[nn+1]))
self.gst.append(np.nan if line[nn+2] == '99.0' else float(line[nn+2]))
self.wvht.append(np.nan if line[nn+3] == '99.0' else float(line[nn+3]))
self.dpd.append(np.nan if line[nn+4] == '99.0' else float(line[nn+4]))
self.apd.append(np.nan if line[nn+5] == '99.0' else float(line[nn+5]))
self.mwd.append(np.nan if line[nn+6] == '999' else float(line[nn+6]))
self.pres.append(np.nan if line[nn+7] == '9999.0' else float(line[nn+7]))
self.atmp.append(np.nan if line[nn+8] == '99.0' else float(line[nn+8]))
self.wtmp.append(np.nan if line[nn+9] == '99.0' else float(line[nn+9]))
self.dewp.append(np.nan if line[nn+10] == '99.0' else float(line[nn+10]))
if self.time[-1] > endtime:
break
year = time.year
month = time.month
if datatype == 'historical':
year += 1
time = datetime(year, month, 1)
continue
elif datatype == 'recent':
month += 1
if month > 12:
break
else:
continue
self.time = np.array(self.time)
self.wdir = np.array(self.wdir)
self.wspd = np.array(self.wspd)
self.gst = np.array(self.gst)
self.wvht = np.array(self.wvht)
self.dpd = np.array(self.dpd)
self.apd = np.array(self.apd)
self.mwd = np.array(self.mwd)
self.pres = np.array(self.pres)
self.atmp = np.array(self.atmp)
self.wtmp = np.array(self.wtmp)
self.dewp = np.array(self.dewp)
def _get_stdmet_realtime(self):
"""
Reads the full realtime data feed (last 45 days) from the NDBC server.
"""
fileurl = _REALTIME_URL+self.id+'.txt'
r = requests.get(fileurl)
if not r.status_code == 200:
raise RuntimeError('Received response status '
+str(r.status_code)+' from '+fileurl)
lines = r.text.split('\n')
for line in lines[-2:1:-1]:
line = line.split()
self.time.append(datetime.strptime(''.join(line[:5]), '%Y%m%d%H%M'))
self.wdir.append(np.nan if line[5] == 'MM' else float(line[5]))
self.wspd.append(np.nan if line[6] == 'MM' else float(line[6]))
self.gst.append(np.nan if line[7] == 'MM' else float(line[7]))
self.wvht.append(np.nan if line[8] == 'MM' else float(line[8]))
self.dpd.append(np.nan if line[9] == 'MM' else float(line[9]))
self.apd.append(np.nan if line[10] == 'MM' else float(line[10]))
self.mwd.append(np.nan if line[11] == 'MM' else float(line[11]))
self.pres.append(np.nan if line[12] == 'MM' else float(line[12]))
self.atmp.append(np.nan if line[13] == 'MM' else float(line[13]))
self.wtmp.append(np.nan if line[14] == 'MM' else float(line[14]))
self.dewp.append(np.nan if line[15] == 'MM' else float(line[15]))
self.vis.append(np.nan if line[16] == 'MM' else float(line[16]))
self.ptdy.append(np.nan if line[17] == 'MM' else float(line[17]))
self.tide.append(np.nan if line[18] == 'MM' else float(line[18]))
self.time = np.array(self.time)
self.wdir = np.array(self.wdir)
self.wspd = np.array(self.wspd)
self.gst = np.array(self.gst)
self.wvht = np.array(self.wvht)
self.dpd = np.array(self.dpd)
self.apd = np.array(self.apd)
self.mwd = np.array(self.mwd)
self.pres = np.array(self.pres)
self.atmp = np.array(self.atmp)
self.wtmp = np.array(self.wtmp)
self.dewp = np.array(self.dewp)
self.vis = np.array(self.vis)
self.ptdy = np.array(self.ptdy)
self.tide = np.array(self.tide)
| """
station.py
"""
from datetime import datetime, timedelta
import gzip
import numpy as np
import requests
import urllib
_BASEURL = 'http://www.ndbc.noaa.gov/data'
_SENSOR_URL = _BASEURL+'/stations/buoyht.txt'
_REALTIME_URL = _BASEURL+'/realtime2/'
_RECENT_URL = _BASEURL+'/stdmet/'
_HISTORICAL_URL = _BASEURL+'/historical/stdmet/'
_STATION_URL = _BASEURL+'/stations/station_table.txt'
class Station(object):
"""NDBC Station class."""
def __init__(self, station_id, starttime=None, endtime=None):
self.id = str(station_id)
self.time = []; self.wspd = []; self.wdir = []; self.gst = []
self.wvht = []; self.dpd = []; self.apd = []; self.mwd = []
self.pres = []; self.atmp = []; self.wtmp = []; self.dewp = []
self.vis = []; self.ptdy = []; self.tide = []
self._get_info()
if starttime and endtime:
self.get_stdmet(starttime, endtime)
def _get_info(self):
"""Collects station metadata."""
r = requests.get(_STATION_URL)
if not r.status_code == 200:
raise RuntimeError('Received response status '
+str(r.status_code)+' from '+_STATION_URL)
lines = r.text.split('\n')
try:
data = [line for line in lines if self.id == line[:5]].pop()
except IndexError:
raise ValueError('Station '+self.id+' not found in '+_STATION_URL)
station_id, self.owner, self.ttype, self.hull, self.name, self.payload,\
self.location, self.timezone, self.forecast, self.note = data.split('|')
loc = self.location.split()
self.lat, self.lon = float(loc[0]), float(loc[2])
if loc[1] == 'S':
self.lat = -self.lat
if loc[3] == 'W':
self.lon = -self.lon
def get_stdmet(self, starttime, endtime):
"""Gets the standard meteorological data given start and end times."""
# re-initialize if we are to overwrite data
#if self.time != [] and self.time != None :
self.__init__(self.id)
if starttime.year < datetime.utcnow().year:
datatype = 'historical'
elif starttime > datetime.utcnow() - timedelta(days=45):
self._get_stdmet_realtime()
return
elif starttime.year == datetime.utcnow().year:
datatype = 'recent'
else:
raise ValueError('starttime cannot be in the future')
time = starttime
while True:
if datatype == 'historical':
filename = self.id+'h'+str(time.year)+'.txt.gz'
fileurl = _HISTORICAL_URL+filename
elif datatype == 'recent':
filename = self.id+str(time.month)+str(time.year)+'.txt.gz'
fileurl = _RECENT_URL+time.strftime('%b')+'/'+filename
f = gzip.open(urllib.request.urlopen(fileurl))
if time.year >= 2007:
datastart = 2
else:
datastart = 1
lines = [line.decode().strip() for line in f.readlines()]
for line in lines[datastart:]:
line = line.split()
try:
self.time.append(datetime.strptime(''.join(line[:5]), '%Y%m%d%H%M'))
nn = 5
except ValueError:
self.time.append(datetime.strptime(''.join(line[:4]), '%Y%m%d%H'))
nn = 4
self.wdir.append(np.nan if line[nn] == '999' else float(line[nn]))
self.wspd.append(np.nan if line[nn+1] == '99.0' else float(line[nn+1]))
self.gst.append(np.nan if line[nn+2] == '99.0' else float(line[nn+2]))
self.wvht.append(np.nan if line[nn+3] == '99.0' else float(line[nn+3]))
self.dpd.append(np.nan if line[nn+4] == '99.0' else float(line[nn+4]))
self.apd.append(np.nan if line[nn+5] == '99.0' else float(line[nn+5]))
self.mwd.append(np.nan if line[nn+6] == '999' else float(line[nn+6]))
self.pres.append(np.nan if line[nn+7] == '9999.0' else float(line[nn+7]))
self.atmp.append(np.nan if line[nn+8] == '99.0' else float(line[nn+8]))
self.wtmp.append(np.nan if line[nn+9] == '99.0' else float(line[nn+9]))
self.dewp.append(np.nan if line[nn+10] == '99.0' else float(line[nn+10]))
if self.time[-1] > endtime:
break
year = time.year
month = time.month
if datatype == 'historical':
year += 1
time = datetime(year, month, 1)
continue
elif datatype == 'recent':
month += 1
if month > 12:
break
else:
continue
self.time = np.array(self.time)
self.wdir = np.array(self.wdir)
self.wspd = np.array(self.wspd)
self.gst = np.array(self.gst)
self.wvht = np.array(self.wvht)
self.dpd = np.array(self.dpd)
self.apd = np.array(self.apd)
self.mwd = np.array(self.mwd)
self.pres = np.array(self.pres)
self.atmp = np.array(self.atmp)
self.wtmp = np.array(self.wtmp)
self.dewp = np.array(self.dewp)
def _get_stdmet_realtime(self):
"""
Reads the full realtime data feed (last 45 days) from the NDBC server.
"""
fileurl = _REALTIME_URL+self.id+'.txt'
r = requests.get(fileurl)
if not r.status_code == 200:
raise RuntimeError('Received response status '
+str(r.status_code)+' from '+fileurl)
lines = r.text.split('\n')
for line in lines[-2:1:-1]:
line = line.split()
self.time.append(datetime.strptime(''.join(line[:5]), '%Y%m%d%H%M'))
self.wdir.append(np.nan if line[5] == 'MM' else float(line[5]))
self.wspd.append(np.nan if line[6] == 'MM' else float(line[6]))
self.gst.append(np.nan if line[7] == 'MM' else float(line[7]))
self.wvht.append(np.nan if line[8] == 'MM' else float(line[8]))
self.dpd.append(np.nan if line[9] == 'MM' else float(line[9]))
self.apd.append(np.nan if line[10] == 'MM' else float(line[10]))
self.mwd.append(np.nan if line[11] == 'MM' else float(line[11]))
self.pres.append(np.nan if line[12] == 'MM' else float(line[12]))
self.atmp.append(np.nan if line[13] == 'MM' else float(line[13]))
self.wtmp.append(np.nan if line[14] == 'MM' else float(line[14]))
self.dewp.append(np.nan if line[15] == 'MM' else float(line[15]))
self.vis.append(np.nan if line[16] == 'MM' else float(line[16]))
self.ptdy.append(np.nan if line[17] == 'MM' else float(line[17]))
self.tide.append(np.nan if line[18] == 'MM' else float(line[18]))
self.time = np.array(self.time)
self.wdir = np.array(self.wdir)
self.wspd = np.array(self.wspd)
self.gst = np.array(self.gst)
self.wvht = np.array(self.wvht)
self.dpd = np.array(self.dpd)
self.apd = np.array(self.apd)
self.mwd = np.array(self.mwd)
self.pres = np.array(self.pres)
self.atmp = np.array(self.atmp)
self.wtmp = np.array(self.wtmp)
self.dewp = np.array(self.dewp)
self.vis = np.array(self.vis)
self.ptdy = np.array(self.ptdy)
self.tide = np.array(self.tide) | en | 0.797104 | station.py NDBC Station class. Collects station metadata. Gets the standard meteorological data given start and end times. # re-initialize if we are to overwrite data #if self.time != [] and self.time != None : Reads the full realtime data feed (last 45 days) from the NDBC server. | 2.53855 | 3 |
main.py | kajuna0amendez/Cython_Machine_Learning_Models | 0 | 10676 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
__author__ = "<NAME>"
__copyright__ = "Copyright 2018"
__credits__ = ["<NAME>"]
__license__ = "Apache"
__version__ = "v1.0.0"
__maintainer__ = "<NAME>"
__email = "<EMAIL>"
__status__ = "Development"
from data_model.load_data import create_connection, select_all_tasks
from tools.data_frames import dframe_t_db
def main():
database = "/Cython_Code/database/heart.db"
# create a database connection
conn = create_connection(database)
with conn:
print("2. Query all tasks")
rows, name = select_all_tasks(conn, 'heart_table')
return dframe_t_db(rows, name)
if __name__ == '__main__':
df = main()
print(df)
| # -*- coding: utf-8 -*-
#!/usr/bin/env python
__author__ = "<NAME>"
__copyright__ = "Copyright 2018"
__credits__ = ["<NAME>"]
__license__ = "Apache"
__version__ = "v1.0.0"
__maintainer__ = "<NAME>"
__email = "<EMAIL>"
__status__ = "Development"
from data_model.load_data import create_connection, select_all_tasks
from tools.data_frames import dframe_t_db
def main():
database = "/Cython_Code/database/heart.db"
# create a database connection
conn = create_connection(database)
with conn:
print("2. Query all tasks")
rows, name = select_all_tasks(conn, 'heart_table')
return dframe_t_db(rows, name)
if __name__ == '__main__':
df = main()
print(df)
| en | 0.400805 | # -*- coding: utf-8 -*- #!/usr/bin/env python # create a database connection | 2.795511 | 3 |
graw/__init__.py | iamsajjad/graw | 0 | 10677 | <filename>graw/__init__.py
# version of the graw package
__version__ = "0.1.0"
| <filename>graw/__init__.py
# version of the graw package
__version__ = "0.1.0"
| en | 0.790388 | # version of the graw package | 1.113284 | 1 |
sdk/python/pulumi_aws/acm/get_certificate.py | mehd-io/pulumi-aws | 0 | 10678 | <gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'GetCertificateResult',
'AwaitableGetCertificateResult',
'get_certificate',
]
@pulumi.output_type
class GetCertificateResult:
"""
A collection of values returned by getCertificate.
"""
def __init__(__self__, arn=None, domain=None, id=None, key_types=None, most_recent=None, statuses=None, tags=None, types=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if domain and not isinstance(domain, str):
raise TypeError("Expected argument 'domain' to be a str")
pulumi.set(__self__, "domain", domain)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if key_types and not isinstance(key_types, list):
raise TypeError("Expected argument 'key_types' to be a list")
pulumi.set(__self__, "key_types", key_types)
if most_recent and not isinstance(most_recent, bool):
raise TypeError("Expected argument 'most_recent' to be a bool")
pulumi.set(__self__, "most_recent", most_recent)
if statuses and not isinstance(statuses, list):
raise TypeError("Expected argument 'statuses' to be a list")
pulumi.set(__self__, "statuses", statuses)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if types and not isinstance(types, list):
raise TypeError("Expected argument 'types' to be a list")
pulumi.set(__self__, "types", types)
@property
@pulumi.getter
def arn(self) -> str:
"""
Set to the ARN of the found certificate, suitable for referencing in other resources that support ACM certificates.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def domain(self) -> str:
return pulumi.get(self, "domain")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="keyTypes")
def key_types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "key_types")
@property
@pulumi.getter(name="mostRecent")
def most_recent(self) -> Optional[bool]:
return pulumi.get(self, "most_recent")
@property
@pulumi.getter
def statuses(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "statuses")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags for the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "types")
class AwaitableGetCertificateResult(GetCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCertificateResult(
arn=self.arn,
domain=self.domain,
id=self.id,
key_types=self.key_types,
most_recent=self.most_recent,
statuses=self.statuses,
tags=self.tags,
types=self.types)
def get_certificate(domain: Optional[str] = None,
key_types: Optional[Sequence[str]] = None,
most_recent: Optional[bool] = None,
statuses: Optional[Sequence[str]] = None,
tags: Optional[Mapping[str, str]] = None,
types: Optional[Sequence[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCertificateResult:
"""
Use this data source to get the ARN of a certificate in AWS Certificate
Manager (ACM), you can reference
it by domain without having to hard code the ARNs as input.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
issued = aws.acm.get_certificate(domain="tf.example.com",
statuses=["ISSUED"])
amazon_issued = aws.acm.get_certificate(domain="tf.example.com",
most_recent=True,
types=["AMAZON_ISSUED"])
rsa4096 = aws.acm.get_certificate(domain="tf.example.com",
key_types=["RSA_4096"])
```
:param str domain: The domain of the certificate to look up. If no certificate is found with this name, an error will be returned.
:param Sequence[str] key_types: A list of key algorithms to filter certificates. By default, ACM does not return all certificate types when searching. Valid values are `RSA_1024`, `RSA_2048`, `RSA_4096`, `EC_prime256v1`, `EC_secp384r1`, and `EC_secp521r1`.
:param bool most_recent: If set to true, it sorts the certificates matched by previous criteria by the NotBefore field, returning only the most recent one. If set to false, it returns an error if more than one certificate is found. Defaults to false.
:param Sequence[str] statuses: A list of statuses on which to filter the returned list. Valid values are `PENDING_VALIDATION`, `ISSUED`,
`INACTIVE`, `EXPIRED`, `VALIDATION_TIMED_OUT`, `REVOKED` and `FAILED`. If no value is specified, only certificates in the `ISSUED` state
are returned.
:param Mapping[str, str] tags: A mapping of tags for the resource.
:param Sequence[str] types: A list of types on which to filter the returned list. Valid values are `AMAZON_ISSUED` and `IMPORTED`.
"""
__args__ = dict()
__args__['domain'] = domain
__args__['keyTypes'] = key_types
__args__['mostRecent'] = most_recent
__args__['statuses'] = statuses
__args__['tags'] = tags
__args__['types'] = types
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:acm/getCertificate:getCertificate', __args__, opts=opts, typ=GetCertificateResult).value
return AwaitableGetCertificateResult(
arn=__ret__.arn,
domain=__ret__.domain,
id=__ret__.id,
key_types=__ret__.key_types,
most_recent=__ret__.most_recent,
statuses=__ret__.statuses,
tags=__ret__.tags,
types=__ret__.types)
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'GetCertificateResult',
'AwaitableGetCertificateResult',
'get_certificate',
]
@pulumi.output_type
class GetCertificateResult:
"""
A collection of values returned by getCertificate.
"""
def __init__(__self__, arn=None, domain=None, id=None, key_types=None, most_recent=None, statuses=None, tags=None, types=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if domain and not isinstance(domain, str):
raise TypeError("Expected argument 'domain' to be a str")
pulumi.set(__self__, "domain", domain)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if key_types and not isinstance(key_types, list):
raise TypeError("Expected argument 'key_types' to be a list")
pulumi.set(__self__, "key_types", key_types)
if most_recent and not isinstance(most_recent, bool):
raise TypeError("Expected argument 'most_recent' to be a bool")
pulumi.set(__self__, "most_recent", most_recent)
if statuses and not isinstance(statuses, list):
raise TypeError("Expected argument 'statuses' to be a list")
pulumi.set(__self__, "statuses", statuses)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if types and not isinstance(types, list):
raise TypeError("Expected argument 'types' to be a list")
pulumi.set(__self__, "types", types)
@property
@pulumi.getter
def arn(self) -> str:
"""
Set to the ARN of the found certificate, suitable for referencing in other resources that support ACM certificates.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def domain(self) -> str:
return pulumi.get(self, "domain")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="keyTypes")
def key_types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "key_types")
@property
@pulumi.getter(name="mostRecent")
def most_recent(self) -> Optional[bool]:
return pulumi.get(self, "most_recent")
@property
@pulumi.getter
def statuses(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "statuses")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags for the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "types")
class AwaitableGetCertificateResult(GetCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCertificateResult(
arn=self.arn,
domain=self.domain,
id=self.id,
key_types=self.key_types,
most_recent=self.most_recent,
statuses=self.statuses,
tags=self.tags,
types=self.types)
def get_certificate(domain: Optional[str] = None,
key_types: Optional[Sequence[str]] = None,
most_recent: Optional[bool] = None,
statuses: Optional[Sequence[str]] = None,
tags: Optional[Mapping[str, str]] = None,
types: Optional[Sequence[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCertificateResult:
"""
Use this data source to get the ARN of a certificate in AWS Certificate
Manager (ACM), you can reference
it by domain without having to hard code the ARNs as input.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
issued = aws.acm.get_certificate(domain="tf.example.com",
statuses=["ISSUED"])
amazon_issued = aws.acm.get_certificate(domain="tf.example.com",
most_recent=True,
types=["AMAZON_ISSUED"])
rsa4096 = aws.acm.get_certificate(domain="tf.example.com",
key_types=["RSA_4096"])
```
:param str domain: The domain of the certificate to look up. If no certificate is found with this name, an error will be returned.
:param Sequence[str] key_types: A list of key algorithms to filter certificates. By default, ACM does not return all certificate types when searching. Valid values are `RSA_1024`, `RSA_2048`, `RSA_4096`, `EC_prime256v1`, `EC_secp384r1`, and `EC_secp521r1`.
:param bool most_recent: If set to true, it sorts the certificates matched by previous criteria by the NotBefore field, returning only the most recent one. If set to false, it returns an error if more than one certificate is found. Defaults to false.
:param Sequence[str] statuses: A list of statuses on which to filter the returned list. Valid values are `PENDING_VALIDATION`, `ISSUED`,
`INACTIVE`, `EXPIRED`, `VALIDATION_TIMED_OUT`, `REVOKED` and `FAILED`. If no value is specified, only certificates in the `ISSUED` state
are returned.
:param Mapping[str, str] tags: A mapping of tags for the resource.
:param Sequence[str] types: A list of types on which to filter the returned list. Valid values are `AMAZON_ISSUED` and `IMPORTED`.
"""
__args__ = dict()
__args__['domain'] = domain
__args__['keyTypes'] = key_types
__args__['mostRecent'] = most_recent
__args__['statuses'] = statuses
__args__['tags'] = tags
__args__['types'] = types
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:acm/getCertificate:getCertificate', __args__, opts=opts, typ=GetCertificateResult).value
return AwaitableGetCertificateResult(
arn=__ret__.arn,
domain=__ret__.domain,
id=__ret__.id,
key_types=__ret__.key_types,
most_recent=__ret__.most_recent,
statuses=__ret__.statuses,
tags=__ret__.tags,
types=__ret__.types) | en | 0.763646 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** A collection of values returned by getCertificate. Set to the ARN of the found certificate, suitable for referencing in other resources that support ACM certificates. The provider-assigned unique ID for this managed resource. A mapping of tags for the resource. # pylint: disable=using-constant-test Use this data source to get the ARN of a certificate in AWS Certificate Manager (ACM), you can reference it by domain without having to hard code the ARNs as input. ## Example Usage ```python import pulumi import pulumi_aws as aws issued = aws.acm.get_certificate(domain="tf.example.com", statuses=["ISSUED"]) amazon_issued = aws.acm.get_certificate(domain="tf.example.com", most_recent=True, types=["AMAZON_ISSUED"]) rsa4096 = aws.acm.get_certificate(domain="tf.example.com", key_types=["RSA_4096"]) ``` :param str domain: The domain of the certificate to look up. If no certificate is found with this name, an error will be returned. :param Sequence[str] key_types: A list of key algorithms to filter certificates. By default, ACM does not return all certificate types when searching. Valid values are `RSA_1024`, `RSA_2048`, `RSA_4096`, `EC_prime256v1`, `EC_secp384r1`, and `EC_secp521r1`. :param bool most_recent: If set to true, it sorts the certificates matched by previous criteria by the NotBefore field, returning only the most recent one. If set to false, it returns an error if more than one certificate is found. Defaults to false. :param Sequence[str] statuses: A list of statuses on which to filter the returned list. Valid values are `PENDING_VALIDATION`, `ISSUED`, `INACTIVE`, `EXPIRED`, `VALIDATION_TIMED_OUT`, `REVOKED` and `FAILED`. If no value is specified, only certificates in the `ISSUED` state are returned. :param Mapping[str, str] tags: A mapping of tags for the resource. :param Sequence[str] types: A list of types on which to filter the returned list. Valid values are `AMAZON_ISSUED` and `IMPORTED`. | 1.885338 | 2 |
Part 3/batch_VS_stochastic.py | m9psy/neural_network_habr_guide | 20 | 10679 | import numpy as np
import matplotlib.pyplot as plt
TOTAL = 200
STEP = 0.25
EPS = 0.1
INITIAL_THETA = [9, 14]
def func(x):
return 0.2 * x + 3
def generate_sample(total=TOTAL):
x = 0
while x < total * STEP:
yield func(x) + np.random.uniform(-1, 1) * np.random.uniform(2, 8)
x += STEP
def cost_function(A, Y, theta):
return (Y - A@theta).T@(Y - A@theta)
def batch_descent(A, Y, speed=0.001):
theta = np.array(INITIAL_THETA.copy(), dtype=np.float32)
theta.reshape((len(theta), 1))
previous_cost = 10 ** 6
current_cost = cost_function(A, Y, theta)
while np.abs(previous_cost - current_cost) > EPS:
previous_cost = current_cost
derivatives = [0] * len(theta)
# ---------------------------------------------
for j in range(len(theta)):
summ = 0
for i in range(len(Y)):
summ += (Y[i] - A[i]@theta) * A[i][j]
derivatives[j] = summ
# Выполнение требования одновремменности
theta[0] += speed * derivatives[0]
theta[1] += speed * derivatives[1]
# ---------------------------------------------
current_cost = cost_function(A, Y, theta)
print("Batch cost:", current_cost)
plt.plot(theta[0], theta[1], 'ro')
return theta
def stochastic_descent(A, Y, speed=0.1):
theta = np.array(INITIAL_THETA.copy(), dtype=np.float32)
previous_cost = 10 ** 6
current_cost = cost_function(A, Y, theta)
while np.abs(previous_cost - current_cost) > EPS:
previous_cost = current_cost
# --------------------------------------
# for i in range(len(Y)):
i = np.random.randint(0, len(Y))
derivatives = [0] * len(theta)
for j in range(len(theta)):
derivatives[j] = (Y[i] - A[i]@theta) * A[i][j]
theta[0] += speed * derivatives[0]
theta[1] += speed * derivatives[1]
# --------------------------------------
current_cost = cost_function(A, Y, theta)
print("Stochastic cost:", current_cost)
plt.plot(theta[0], theta[1], 'ro')
return theta
X = np.arange(0, TOTAL * STEP, STEP)
Y = np.array([y for y in generate_sample(TOTAL)])
# Нормализацию вкрячил, чтобы парабалоид красивый был
X = (X - X.min()) / (X.max() - X.min())
A = np.empty((TOTAL, 2))
A[:, 0] = 1
A[:, 1] = X
theta = np.linalg.pinv(A).dot(Y)
print(theta, cost_function(A, Y, theta))
import time
start = time.clock()
theta_stochastic = stochastic_descent(A, Y, 0.1)
print("St:", time.clock() - start, theta_stochastic)
start = time.clock()
theta_batch = batch_descent(A, Y, 0.001)
print("Btch:", time.clock() - start, theta_batch)
| import numpy as np
import matplotlib.pyplot as plt
TOTAL = 200
STEP = 0.25
EPS = 0.1
INITIAL_THETA = [9, 14]
def func(x):
return 0.2 * x + 3
def generate_sample(total=TOTAL):
x = 0
while x < total * STEP:
yield func(x) + np.random.uniform(-1, 1) * np.random.uniform(2, 8)
x += STEP
def cost_function(A, Y, theta):
return (Y - A@theta).T@(Y - A@theta)
def batch_descent(A, Y, speed=0.001):
theta = np.array(INITIAL_THETA.copy(), dtype=np.float32)
theta.reshape((len(theta), 1))
previous_cost = 10 ** 6
current_cost = cost_function(A, Y, theta)
while np.abs(previous_cost - current_cost) > EPS:
previous_cost = current_cost
derivatives = [0] * len(theta)
# ---------------------------------------------
for j in range(len(theta)):
summ = 0
for i in range(len(Y)):
summ += (Y[i] - A[i]@theta) * A[i][j]
derivatives[j] = summ
# Выполнение требования одновремменности
theta[0] += speed * derivatives[0]
theta[1] += speed * derivatives[1]
# ---------------------------------------------
current_cost = cost_function(A, Y, theta)
print("Batch cost:", current_cost)
plt.plot(theta[0], theta[1], 'ro')
return theta
def stochastic_descent(A, Y, speed=0.1):
theta = np.array(INITIAL_THETA.copy(), dtype=np.float32)
previous_cost = 10 ** 6
current_cost = cost_function(A, Y, theta)
while np.abs(previous_cost - current_cost) > EPS:
previous_cost = current_cost
# --------------------------------------
# for i in range(len(Y)):
i = np.random.randint(0, len(Y))
derivatives = [0] * len(theta)
for j in range(len(theta)):
derivatives[j] = (Y[i] - A[i]@theta) * A[i][j]
theta[0] += speed * derivatives[0]
theta[1] += speed * derivatives[1]
# --------------------------------------
current_cost = cost_function(A, Y, theta)
print("Stochastic cost:", current_cost)
plt.plot(theta[0], theta[1], 'ro')
return theta
X = np.arange(0, TOTAL * STEP, STEP)
Y = np.array([y for y in generate_sample(TOTAL)])
# Нормализацию вкрячил, чтобы парабалоид красивый был
X = (X - X.min()) / (X.max() - X.min())
A = np.empty((TOTAL, 2))
A[:, 0] = 1
A[:, 1] = X
theta = np.linalg.pinv(A).dot(Y)
print(theta, cost_function(A, Y, theta))
import time
start = time.clock()
theta_stochastic = stochastic_descent(A, Y, 0.1)
print("St:", time.clock() - start, theta_stochastic)
start = time.clock()
theta_batch = batch_descent(A, Y, 0.001)
print("Btch:", time.clock() - start, theta_batch)
| ru | 0.662263 | # --------------------------------------------- # Выполнение требования одновремменности # --------------------------------------------- # -------------------------------------- # for i in range(len(Y)): # -------------------------------------- # Нормализацию вкрячил, чтобы парабалоид красивый был | 3.63758 | 4 |
castle.py | codyd51/castle | 2 | 10680 | import castle
from typing import Tuple
def select_player_types() -> Tuple[castle.PlayerType, castle.PlayerType]:
player1, player2 = None, None
while True:
print(f'1) Play a person')
print(f'2) Play the computer')
print(f'3) Play the computer against itself')
choice_str = input(f'Select an option: ')
try:
choice = int(choice_str)
if choice not in [1, 2, 3]:
raise ValueError
except ValueError:
print('Invalid option.\n')
continue
if choice == 1:
player1 = castle.PlayerType.HUMAN
player2 = castle.PlayerType.HUMAN
elif choice == 2:
player1 = castle.PlayerType.HUMAN
player2 = castle.PlayerType.COMPUTER
elif choice == 3:
player1 = castle.PlayerType.COMPUTER
player2 = castle.PlayerType.COMPUTER
break
return player1, player2
def play_constructed_game(g: castle.Game):
g.board.pretty_print()
while not g.finished:
print(f'white short {g.can_castle(castle.Color.WHITE, True)}')
print(f'white long {g.can_castle(castle.Color.WHITE, False)}')
print(f'black short {g.can_castle(castle.Color.BLACK, True)}')
print(f'black long {g.can_castle(castle.Color.BLACK, False)}')
g.play_turn()
winning_prefix = f'Game over by '
if g.winner == castle.Winner.DRAW:
winning_prefix += 'stalemate'
else:
winning_prefix += 'checkmate'
winning_text = f'{winning_prefix}. Winner: {g.winner.name.title()}'
print(winning_text)
def play_game():
player1, player2 = select_player_types()
g = castle.Game(player1, player2)
play_constructed_game(g)
def test_perft():
g = castle.Game(castle.PlayerType.HUMAN, castle.PlayerType.HUMAN)
g.board.pretty_print()
for i in range(10):
print(f'perft({i}) = {g.perft(i)}')
def test_perft2():
g = castle.Game(castle.PlayerType.HUMAN, castle.PlayerType.HUMAN)
g.board.clear()
# https://sites.google.com/site/numptychess/perft/position-3
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.BLACK), 'a8')
g.board.place_piece(castle.Piece(castle.PieceType.KING, castle.Color.BLACK), 'e8')
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.BLACK), 'h8')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'a7')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'h7')
g.board.place_piece(castle.Piece(castle.PieceType.BISHOP, castle.Color.WHITE), 'a5')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'b4')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'c4')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'e4')
g.board.place_piece(castle.Piece(castle.PieceType.BISHOP, castle.Color.BLACK), 'd3')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.WHITE), 'a2')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.WHITE), 'h2')
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.WHITE), 'a1')
g.board.place_piece(castle.Piece(castle.PieceType.KING, castle.Color.WHITE), 'e1')
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.WHITE), 'h1')
g.board.pretty_print()
for i in range(2):
print(f'perft({i}) = {g.perft(i)}')
def fen():
# f = castle.FenGameConstructor('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1')
game = 'rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq e3 0 1'
game = 'r3k2r/p6p/8/B7/1pp1p3/3b4/P6P/R3K2R w KQkq - 0 1'
game = '8/5p2/8/2k3P1/p3K3/8/1P6/8 b - - 0 1'
f = castle.FenGameConstructor(game)
return f.game
def main():
print(f'Welcome to castle, a litte chess engine.\n')
# test_perft()
g = fen()
print('returned')
g.print_perft(5)
play_constructed_game(g)
# play_game()
if __name__ == '__main__':
main()
| import castle
from typing import Tuple
def select_player_types() -> Tuple[castle.PlayerType, castle.PlayerType]:
player1, player2 = None, None
while True:
print(f'1) Play a person')
print(f'2) Play the computer')
print(f'3) Play the computer against itself')
choice_str = input(f'Select an option: ')
try:
choice = int(choice_str)
if choice not in [1, 2, 3]:
raise ValueError
except ValueError:
print('Invalid option.\n')
continue
if choice == 1:
player1 = castle.PlayerType.HUMAN
player2 = castle.PlayerType.HUMAN
elif choice == 2:
player1 = castle.PlayerType.HUMAN
player2 = castle.PlayerType.COMPUTER
elif choice == 3:
player1 = castle.PlayerType.COMPUTER
player2 = castle.PlayerType.COMPUTER
break
return player1, player2
def play_constructed_game(g: castle.Game):
g.board.pretty_print()
while not g.finished:
print(f'white short {g.can_castle(castle.Color.WHITE, True)}')
print(f'white long {g.can_castle(castle.Color.WHITE, False)}')
print(f'black short {g.can_castle(castle.Color.BLACK, True)}')
print(f'black long {g.can_castle(castle.Color.BLACK, False)}')
g.play_turn()
winning_prefix = f'Game over by '
if g.winner == castle.Winner.DRAW:
winning_prefix += 'stalemate'
else:
winning_prefix += 'checkmate'
winning_text = f'{winning_prefix}. Winner: {g.winner.name.title()}'
print(winning_text)
def play_game():
player1, player2 = select_player_types()
g = castle.Game(player1, player2)
play_constructed_game(g)
def test_perft():
g = castle.Game(castle.PlayerType.HUMAN, castle.PlayerType.HUMAN)
g.board.pretty_print()
for i in range(10):
print(f'perft({i}) = {g.perft(i)}')
def test_perft2():
g = castle.Game(castle.PlayerType.HUMAN, castle.PlayerType.HUMAN)
g.board.clear()
# https://sites.google.com/site/numptychess/perft/position-3
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.BLACK), 'a8')
g.board.place_piece(castle.Piece(castle.PieceType.KING, castle.Color.BLACK), 'e8')
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.BLACK), 'h8')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'a7')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'h7')
g.board.place_piece(castle.Piece(castle.PieceType.BISHOP, castle.Color.WHITE), 'a5')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'b4')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'c4')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'e4')
g.board.place_piece(castle.Piece(castle.PieceType.BISHOP, castle.Color.BLACK), 'd3')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.WHITE), 'a2')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.WHITE), 'h2')
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.WHITE), 'a1')
g.board.place_piece(castle.Piece(castle.PieceType.KING, castle.Color.WHITE), 'e1')
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.WHITE), 'h1')
g.board.pretty_print()
for i in range(2):
print(f'perft({i}) = {g.perft(i)}')
def fen():
# f = castle.FenGameConstructor('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1')
game = 'rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq e3 0 1'
game = 'r3k2r/p6p/8/B7/1pp1p3/3b4/P6P/R3K2R w KQkq - 0 1'
game = '8/5p2/8/2k3P1/p3K3/8/1P6/8 b - - 0 1'
f = castle.FenGameConstructor(game)
return f.game
def main():
print(f'Welcome to castle, a litte chess engine.\n')
# test_perft()
g = fen()
print('returned')
g.print_perft(5)
play_constructed_game(g)
# play_game()
if __name__ == '__main__':
main()
| en | 0.447327 | # https://sites.google.com/site/numptychess/perft/position-3 # f = castle.FenGameConstructor('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1') # test_perft() # play_game() | 3.613317 | 4 |
pyfos/utils/configure/switch_configuration_show.py | madhavinaiduprathap/pyfosbrocade | 44 | 10681 | <gh_stars>10-100
#!/usr/bin/env python3
# Copyright 2018 Brocade Communications Systems LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`switch_configuration_show` - PyFOS util for configuring switch operation
********************************************************************************
The :mod:`switch_configuration_show` util provides for configuring switch \
operation.
This module is a stand-alone script that can be used to display switch
attributes.
* Input:
* -L=<login>: The login ID. If not provided, an interactive
prompt will request one.
* -P=<password>: The password. If not provided, an interactive
prompt will request one.
* -i=<IP address>: The IP address.
* -f=<VFID>: The VFID or -1 if VF is disabled. If unspecified,
a VFID of 128 is assumed.
* Output:
* The switch attributes in JSON format.
.. function:: show_switch_conf(session)
Example Usage of the Method::
ret = switch_configuration_show.show_switch_conf(session)
print (ret)
Details::
switch_conf_obj = switch_configuration()
result = switch_conf_obj.get(session)
return result
* Input:
:param session: The session returned by login.
* Output:
:rtype: A dictionary of return status matching the REST response.
*Use Cases*
1. Retrieve the configuration parameters of the switch.
"""
import sys
from pyfos import pyfos_auth
import pyfos.pyfos_brocade_fibrechannel_configuration as py_fc
from pyfos import pyfos_util
from pyfos.utils import brcd_util
switch = py_fc.switch_configuration
def show_switch_conf(session):
switch_conf_obj = switch()
result = switch_conf_obj.get(session)
return result
def main(argv):
filters = []
inputs = brcd_util.parse(argv, switch, filters)
session = brcd_util.getsession(inputs)
result = show_switch_conf(inputs['session'])
pyfos_util.response_print(result)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
| #!/usr/bin/env python3
# Copyright 2018 Brocade Communications Systems LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`switch_configuration_show` - PyFOS util for configuring switch operation
********************************************************************************
The :mod:`switch_configuration_show` util provides for configuring switch \
operation.
This module is a stand-alone script that can be used to display switch
attributes.
* Input:
* -L=<login>: The login ID. If not provided, an interactive
prompt will request one.
* -P=<password>: The password. If not provided, an interactive
prompt will request one.
* -i=<IP address>: The IP address.
* -f=<VFID>: The VFID or -1 if VF is disabled. If unspecified,
a VFID of 128 is assumed.
* Output:
* The switch attributes in JSON format.
.. function:: show_switch_conf(session)
Example Usage of the Method::
ret = switch_configuration_show.show_switch_conf(session)
print (ret)
Details::
switch_conf_obj = switch_configuration()
result = switch_conf_obj.get(session)
return result
* Input:
:param session: The session returned by login.
* Output:
:rtype: A dictionary of return status matching the REST response.
*Use Cases*
1. Retrieve the configuration parameters of the switch.
"""
import sys
from pyfos import pyfos_auth
import pyfos.pyfos_brocade_fibrechannel_configuration as py_fc
from pyfos import pyfos_util
from pyfos.utils import brcd_util
switch = py_fc.switch_configuration
def show_switch_conf(session):
switch_conf_obj = switch()
result = switch_conf_obj.get(session)
return result
def main(argv):
filters = []
inputs = brcd_util.parse(argv, switch, filters)
session = brcd_util.getsession(inputs)
result = show_switch_conf(inputs['session'])
pyfos_util.response_print(result)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:]) | en | 0.651811 | #!/usr/bin/env python3 # Copyright 2018 Brocade Communications Systems LLC. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may also obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. :mod:`switch_configuration_show` - PyFOS util for configuring switch operation ******************************************************************************** The :mod:`switch_configuration_show` util provides for configuring switch \ operation. This module is a stand-alone script that can be used to display switch attributes. * Input: * -L=<login>: The login ID. If not provided, an interactive prompt will request one. * -P=<password>: The password. If not provided, an interactive prompt will request one. * -i=<IP address>: The IP address. * -f=<VFID>: The VFID or -1 if VF is disabled. If unspecified, a VFID of 128 is assumed. * Output: * The switch attributes in JSON format. .. function:: show_switch_conf(session) Example Usage of the Method:: ret = switch_configuration_show.show_switch_conf(session) print (ret) Details:: switch_conf_obj = switch_configuration() result = switch_conf_obj.get(session) return result * Input: :param session: The session returned by login. * Output: :rtype: A dictionary of return status matching the REST response. *Use Cases* 1. Retrieve the configuration parameters of the switch. | 2.291837 | 2 |
vehicle/views.py | BernardAli/vehicle-service-mgt | 105 | 10682 | <filename>vehicle/views.py
from django.shortcuts import render,redirect,reverse
from . import forms,models
from django.db.models import Sum
from django.contrib.auth.models import Group
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required,user_passes_test
from django.conf import settings
from django.db.models import Q
def home_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicle/index.html')
#for showing signup/login button for customer
def customerclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicle/customerclick.html')
#for showing signup/login button for mechanics
def mechanicsclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicle/mechanicsclick.html')
#for showing signup/login button for ADMIN(by sumit)
def adminclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return HttpResponseRedirect('adminlogin')
def customer_signup_view(request):
userForm=forms.CustomerUserForm()
customerForm=forms.CustomerForm()
mydict={'userForm':userForm,'customerForm':customerForm}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST)
customerForm=forms.CustomerForm(request.POST,request.FILES)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
customer=customerForm.save(commit=False)
customer.user=user
customer.save()
my_customer_group = Group.objects.get_or_create(name='CUSTOMER')
my_customer_group[0].user_set.add(user)
return HttpResponseRedirect('customerlogin')
return render(request,'vehicle/customersignup.html',context=mydict)
def mechanic_signup_view(request):
userForm=forms.MechanicUserForm()
mechanicForm=forms.MechanicForm()
mydict={'userForm':userForm,'mechanicForm':mechanicForm}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST)
mechanicForm=forms.MechanicForm(request.POST,request.FILES)
if userForm.is_valid() and mechanicForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
mechanic=mechanicForm.save(commit=False)
mechanic.user=user
mechanic.save()
my_mechanic_group = Group.objects.get_or_create(name='MECHANIC')
my_mechanic_group[0].user_set.add(user)
return HttpResponseRedirect('mechaniclogin')
return render(request,'vehicle/mechanicsignup.html',context=mydict)
#for checking user customer, mechanic or admin(by sumit)
def is_customer(user):
return user.groups.filter(name='CUSTOMER').exists()
def is_mechanic(user):
return user.groups.filter(name='MECHANIC').exists()
def afterlogin_view(request):
if is_customer(request.user):
return redirect('customer-dashboard')
elif is_mechanic(request.user):
accountapproval=models.Mechanic.objects.all().filter(user_id=request.user.id,status=True)
if accountapproval:
return redirect('mechanic-dashboard')
else:
return render(request,'vehicle/mechanic_wait_for_approval.html')
else:
return redirect('admin-dashboard')
#============================================================================================
# ADMIN RELATED views start
#============================================================================================
@login_required(login_url='adminlogin')
def admin_dashboard_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
dict={
'total_customer':models.Customer.objects.all().count(),
'total_mechanic':models.Mechanic.objects.all().count(),
'total_request':models.Request.objects.all().count(),
'total_feedback':models.Feedback.objects.all().count(),
'data':zip(customers,enquiry),
}
return render(request,'vehicle/admin_dashboard.html',context=dict)
@login_required(login_url='adminlogin')
def admin_customer_view(request):
return render(request,'vehicle/admin_customer.html')
@login_required(login_url='adminlogin')
def admin_view_customer_view(request):
customers=models.Customer.objects.all()
return render(request,'vehicle/admin_view_customer.html',{'customers':customers})
@login_required(login_url='adminlogin')
def delete_customer_view(request,pk):
customer=models.Customer.objects.get(id=pk)
user=models.User.objects.get(id=customer.user_id)
user.delete()
customer.delete()
return redirect('admin-view-customer')
@login_required(login_url='adminlogin')
def update_customer_view(request,pk):
customer=models.Customer.objects.get(id=pk)
user=models.User.objects.get(id=customer.user_id)
userForm=forms.CustomerUserForm(instance=user)
customerForm=forms.CustomerForm(request.FILES,instance=customer)
mydict={'userForm':userForm,'customerForm':customerForm}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST,instance=user)
customerForm=forms.CustomerForm(request.POST,request.FILES,instance=customer)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
customerForm.save()
return redirect('admin-view-customer')
return render(request,'vehicle/update_customer.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_add_customer_view(request):
userForm=forms.CustomerUserForm()
customerForm=forms.CustomerForm()
mydict={'userForm':userForm,'customerForm':customerForm}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST)
customerForm=forms.CustomerForm(request.POST,request.FILES)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
customer=customerForm.save(commit=False)
customer.user=user
customer.save()
my_customer_group = Group.objects.get_or_create(name='CUSTOMER')
my_customer_group[0].user_set.add(user)
return HttpResponseRedirect('/admin-view-customer')
return render(request,'vehicle/admin_add_customer.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_view_customer_enquiry_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
return render(request,'vehicle/admin_view_customer_enquiry.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def admin_view_customer_invoice_view(request):
enquiry=models.Request.objects.values('customer_id').annotate(Sum('cost'))
print(enquiry)
customers=[]
for enq in enquiry:
print(enq)
customer=models.Customer.objects.get(id=enq['customer_id'])
customers.append(customer)
return render(request,'vehicle/admin_view_customer_invoice.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def admin_mechanic_view(request):
return render(request,'vehicle/admin_mechanic.html')
@login_required(login_url='adminlogin')
def admin_approve_mechanic_view(request):
mechanics=models.Mechanic.objects.all().filter(status=False)
return render(request,'vehicle/admin_approve_mechanic.html',{'mechanics':mechanics})
@login_required(login_url='adminlogin')
def approve_mechanic_view(request,pk):
mechanicSalary=forms.MechanicSalaryForm()
if request.method=='POST':
mechanicSalary=forms.MechanicSalaryForm(request.POST)
if mechanicSalary.is_valid():
mechanic=models.Mechanic.objects.get(id=pk)
mechanic.salary=mechanicSalary.cleaned_data['salary']
mechanic.status=True
mechanic.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-approve-mechanic')
return render(request,'vehicle/admin_approve_mechanic_details.html',{'mechanicSalary':mechanicSalary})
@login_required(login_url='adminlogin')
def delete_mechanic_view(request,pk):
mechanic=models.Mechanic.objects.get(id=pk)
user=models.User.objects.get(id=mechanic.user_id)
user.delete()
mechanic.delete()
return redirect('admin-approve-mechanic')
@login_required(login_url='adminlogin')
def admin_add_mechanic_view(request):
userForm=forms.MechanicUserForm()
mechanicForm=forms.MechanicForm()
mechanicSalary=forms.MechanicSalaryForm()
mydict={'userForm':userForm,'mechanicForm':mechanicForm,'mechanicSalary':mechanicSalary}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST)
mechanicForm=forms.MechanicForm(request.POST,request.FILES)
mechanicSalary=forms.MechanicSalaryForm(request.POST)
if userForm.is_valid() and mechanicForm.is_valid() and mechanicSalary.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
mechanic=mechanicForm.save(commit=False)
mechanic.user=user
mechanic.status=True
mechanic.salary=mechanicSalary.cleaned_data['salary']
mechanic.save()
my_mechanic_group = Group.objects.get_or_create(name='MECHANIC')
my_mechanic_group[0].user_set.add(user)
return HttpResponseRedirect('admin-view-mechanic')
else:
print('problem in form')
return render(request,'vehicle/admin_add_mechanic.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_view_mechanic_view(request):
mechanics=models.Mechanic.objects.all()
return render(request,'vehicle/admin_view_mechanic.html',{'mechanics':mechanics})
@login_required(login_url='adminlogin')
def delete_mechanic_view(request,pk):
mechanic=models.Mechanic.objects.get(id=pk)
user=models.User.objects.get(id=mechanic.user_id)
user.delete()
mechanic.delete()
return redirect('admin-view-mechanic')
@login_required(login_url='adminlogin')
def update_mechanic_view(request,pk):
mechanic=models.Mechanic.objects.get(id=pk)
user=models.User.objects.get(id=mechanic.user_id)
userForm=forms.MechanicUserForm(instance=user)
mechanicForm=forms.MechanicForm(request.FILES,instance=mechanic)
mydict={'userForm':userForm,'mechanicForm':mechanicForm}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST,instance=user)
mechanicForm=forms.MechanicForm(request.POST,request.FILES,instance=mechanic)
if userForm.is_valid() and mechanicForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
mechanicForm.save()
return redirect('admin-view-mechanic')
return render(request,'vehicle/update_mechanic.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_view_mechanic_salary_view(request):
mechanics=models.Mechanic.objects.all()
return render(request,'vehicle/admin_view_mechanic_salary.html',{'mechanics':mechanics})
@login_required(login_url='adminlogin')
def update_salary_view(request,pk):
mechanicSalary=forms.MechanicSalaryForm()
if request.method=='POST':
mechanicSalary=forms.MechanicSalaryForm(request.POST)
if mechanicSalary.is_valid():
mechanic=models.Mechanic.objects.get(id=pk)
mechanic.salary=mechanicSalary.cleaned_data['salary']
mechanic.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-view-mechanic-salary')
return render(request,'vehicle/admin_approve_mechanic_details.html',{'mechanicSalary':mechanicSalary})
@login_required(login_url='adminlogin')
def admin_request_view(request):
return render(request,'vehicle/admin_request.html')
@login_required(login_url='adminlogin')
def admin_view_request_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
return render(request,'vehicle/admin_view_request.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def change_status_view(request,pk):
adminenquiry=forms.AdminApproveRequestForm()
if request.method=='POST':
adminenquiry=forms.AdminApproveRequestForm(request.POST)
if adminenquiry.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.mechanic=adminenquiry.cleaned_data['mechanic']
enquiry_x.cost=adminenquiry.cleaned_data['cost']
enquiry_x.status=adminenquiry.cleaned_data['status']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-view-request')
return render(request,'vehicle/admin_approve_request_details.html',{'adminenquiry':adminenquiry})
@login_required(login_url='adminlogin')
def admin_delete_request_view(request,pk):
requests=models.Request.objects.get(id=pk)
requests.delete()
return redirect('admin-view-request')
@login_required(login_url='adminlogin')
def admin_add_request_view(request):
enquiry=forms.RequestForm()
adminenquiry=forms.AdminRequestForm()
mydict={'enquiry':enquiry,'adminenquiry':adminenquiry}
if request.method=='POST':
enquiry=forms.RequestForm(request.POST)
adminenquiry=forms.AdminRequestForm(request.POST)
if enquiry.is_valid() and adminenquiry.is_valid():
enquiry_x=enquiry.save(commit=False)
enquiry_x.customer=adminenquiry.cleaned_data['customer']
enquiry_x.mechanic=adminenquiry.cleaned_data['mechanic']
enquiry_x.cost=adminenquiry.cleaned_data['cost']
enquiry_x.status='Approved'
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('admin-view-request')
return render(request,'vehicle/admin_add_request.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_approve_request_view(request):
enquiry=models.Request.objects.all().filter(status='Pending')
return render(request,'vehicle/admin_approve_request.html',{'enquiry':enquiry})
@login_required(login_url='adminlogin')
def approve_request_view(request,pk):
adminenquiry=forms.AdminApproveRequestForm()
if request.method=='POST':
adminenquiry=forms.AdminApproveRequestForm(request.POST)
if adminenquiry.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.mechanic=adminenquiry.cleaned_data['mechanic']
enquiry_x.cost=adminenquiry.cleaned_data['cost']
enquiry_x.status=adminenquiry.cleaned_data['status']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-approve-request')
return render(request,'vehicle/admin_approve_request_details.html',{'adminenquiry':adminenquiry})
@login_required(login_url='adminlogin')
def admin_view_service_cost_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
print(customers)
return render(request,'vehicle/admin_view_service_cost.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def update_cost_view(request,pk):
updateCostForm=forms.UpdateCostForm()
if request.method=='POST':
updateCostForm=forms.UpdateCostForm(request.POST)
if updateCostForm.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.cost=updateCostForm.cleaned_data['cost']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-view-service-cost')
return render(request,'vehicle/update_cost.html',{'updateCostForm':updateCostForm})
@login_required(login_url='adminlogin')
def admin_mechanic_attendance_view(request):
return render(request,'vehicle/admin_mechanic_attendance.html')
@login_required(login_url='adminlogin')
def admin_take_attendance_view(request):
mechanics=models.Mechanic.objects.all().filter(status=True)
aform=forms.AttendanceForm()
if request.method=='POST':
form=forms.AttendanceForm(request.POST)
if form.is_valid():
Attendances=request.POST.getlist('present_status')
date=form.cleaned_data['date']
for i in range(len(Attendances)):
AttendanceModel=models.Attendance()
AttendanceModel.date=date
AttendanceModel.present_status=Attendances[i]
print(mechanics[i].id)
print(int(mechanics[i].id))
mechanic=models.Mechanic.objects.get(id=int(mechanics[i].id))
AttendanceModel.mechanic=mechanic
AttendanceModel.save()
return redirect('admin-view-attendance')
else:
print('form invalid')
return render(request,'vehicle/admin_take_attendance.html',{'mechanics':mechanics,'aform':aform})
@login_required(login_url='adminlogin')
def admin_view_attendance_view(request):
form=forms.AskDateForm()
if request.method=='POST':
form=forms.AskDateForm(request.POST)
if form.is_valid():
date=form.cleaned_data['date']
attendancedata=models.Attendance.objects.all().filter(date=date)
mechanicdata=models.Mechanic.objects.all().filter(status=True)
mylist=zip(attendancedata,mechanicdata)
return render(request,'vehicle/admin_view_attendance_page.html',{'mylist':mylist,'date':date})
else:
print('form invalid')
return render(request,'vehicle/admin_view_attendance_ask_date.html',{'form':form})
@login_required(login_url='adminlogin')
def admin_report_view(request):
reports=models.Request.objects.all().filter(Q(status="Repairing Done") | Q(status="Released"))
dict={
'reports':reports,
}
return render(request,'vehicle/admin_report.html',context=dict)
@login_required(login_url='adminlogin')
def admin_feedback_view(request):
feedback=models.Feedback.objects.all().order_by('-id')
return render(request,'vehicle/admin_feedback.html',{'feedback':feedback})
#============================================================================================
# ADMIN RELATED views END
#============================================================================================
#============================================================================================
# CUSTOMER RELATED views start
#============================================================================================
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_dashboard_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
work_in_progress=models.Request.objects.all().filter(customer_id=customer.id,status='Repairing').count()
work_completed=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Repairing Done") | Q(status="Released")).count()
new_request_made=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Pending") | Q(status="Approved")).count()
bill=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Repairing Done") | Q(status="Released")).aggregate(Sum('cost'))
print(bill)
dict={
'work_in_progress':work_in_progress,
'work_completed':work_completed,
'new_request_made':new_request_made,
'bill':bill['cost__sum'],
'customer':customer,
}
return render(request,'vehicle/customer_dashboard.html',context=dict)
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
return render(request,'vehicle/customer_request.html',{'customer':customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_view_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id , status="Pending")
return render(request,'vehicle/customer_view_request.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_delete_request_view(request,pk):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiry=models.Request.objects.get(id=pk)
enquiry.delete()
return redirect('customer-view-request')
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_view_approved_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id).exclude(status='Pending')
return render(request,'vehicle/customer_view_approved_request.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_view_approved_request_invoice_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id).exclude(status='Pending')
return render(request,'vehicle/customer_view_approved_request_invoice.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_add_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiry=forms.RequestForm()
if request.method=='POST':
enquiry=forms.RequestForm(request.POST)
if enquiry.is_valid():
customer=models.Customer.objects.get(user_id=request.user.id)
enquiry_x=enquiry.save(commit=False)
enquiry_x.customer=customer
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('customer-dashboard')
return render(request,'vehicle/customer_add_request.html',{'enquiry':enquiry,'customer':customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_profile_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
return render(request,'vehicle/customer_profile.html',{'customer':customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def edit_customer_profile_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
user=models.User.objects.get(id=customer.user_id)
userForm=forms.CustomerUserForm(instance=user)
customerForm=forms.CustomerForm(request.FILES,instance=customer)
mydict={'userForm':userForm,'customerForm':customerForm,'customer':customer}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST,instance=user)
customerForm=forms.CustomerForm(request.POST,instance=customer)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
customerForm.save()
return HttpResponseRedirect('customer-profile')
return render(request,'vehicle/edit_customer_profile.html',context=mydict)
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_invoice_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id).exclude(status='Pending')
return render(request,'vehicle/customer_invoice.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_feedback_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
feedback=forms.FeedbackForm()
if request.method=='POST':
feedback=forms.FeedbackForm(request.POST)
if feedback.is_valid():
feedback.save()
else:
print("form is invalid")
return render(request,'vehicle/feedback_sent_by_customer.html',{'customer':customer})
return render(request,'vehicle/customer_feedback.html',{'feedback':feedback,'customer':customer})
#============================================================================================
# CUSTOMER RELATED views END
#============================================================================================
#============================================================================================
# MECHANIC RELATED views start
#============================================================================================
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_dashboard_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
work_in_progress=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Repairing').count()
work_completed=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Repairing Done').count()
new_work_assigned=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Approved').count()
dict={
'work_in_progress':work_in_progress,
'work_completed':work_completed,
'new_work_assigned':new_work_assigned,
'salary':mechanic.salary,
'mechanic':mechanic,
}
return render(request,'vehicle/mechanic_dashboard.html',context=dict)
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_work_assigned_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
works=models.Request.objects.all().filter(mechanic_id=mechanic.id)
return render(request,'vehicle/mechanic_work_assigned.html',{'works':works,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_update_status_view(request,pk):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
updateStatus=forms.MechanicUpdateStatusForm()
if request.method=='POST':
updateStatus=forms.MechanicUpdateStatusForm(request.POST)
if updateStatus.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.status=updateStatus.cleaned_data['status']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/mechanic-work-assigned')
return render(request,'vehicle/mechanic_update_status.html',{'updateStatus':updateStatus,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_attendance_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
attendaces=models.Attendance.objects.all().filter(mechanic=mechanic)
return render(request,'vehicle/mechanic_view_attendance.html',{'attendaces':attendaces,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_feedback_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
feedback=forms.FeedbackForm()
if request.method=='POST':
feedback=forms.FeedbackForm(request.POST)
if feedback.is_valid():
feedback.save()
else:
print("form is invalid")
return render(request,'vehicle/feedback_sent.html',{'mechanic':mechanic})
return render(request,'vehicle/mechanic_feedback.html',{'feedback':feedback,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_salary_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
workdone=models.Request.objects.all().filter(mechanic_id=mechanic.id).filter(Q(status="Repairing Done") | Q(status="Released"))
return render(request,'vehicle/mechanic_salary.html',{'workdone':workdone,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_profile_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
return render(request,'vehicle/mechanic_profile.html',{'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def edit_mechanic_profile_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
user=models.User.objects.get(id=mechanic.user_id)
userForm=forms.MechanicUserForm(instance=user)
mechanicForm=forms.MechanicForm(request.FILES,instance=mechanic)
mydict={'userForm':userForm,'mechanicForm':mechanicForm,'mechanic':mechanic}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST,instance=user)
mechanicForm=forms.MechanicForm(request.POST,request.FILES,instance=mechanic)
if userForm.is_valid() and mechanicForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
mechanicForm.save()
return redirect('mechanic-profile')
return render(request,'vehicle/edit_mechanic_profile.html',context=mydict)
#============================================================================================
# MECHANIC RELATED views start
#============================================================================================
# for aboutus and contact
def aboutus_view(request):
return render(request,'vehicle/aboutus.html')
def contactus_view(request):
sub = forms.ContactusForm()
if request.method == 'POST':
sub = forms.ContactusForm(request.POST)
if sub.is_valid():
email = sub.cleaned_data['Email']
name=sub.cleaned_data['Name']
message = sub.cleaned_data['Message']
send_mail(str(name)+' || '+str(email),message,settings.EMAIL_HOST_USER, settings.EMAIL_RECEIVING_USER, fail_silently = False)
return render(request, 'vehicle/contactussuccess.html')
return render(request, 'vehicle/contactus.html', {'form':sub})
| <filename>vehicle/views.py
from django.shortcuts import render,redirect,reverse
from . import forms,models
from django.db.models import Sum
from django.contrib.auth.models import Group
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required,user_passes_test
from django.conf import settings
from django.db.models import Q
def home_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicle/index.html')
#for showing signup/login button for customer
def customerclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicle/customerclick.html')
#for showing signup/login button for mechanics
def mechanicsclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicle/mechanicsclick.html')
#for showing signup/login button for ADMIN(by sumit)
def adminclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return HttpResponseRedirect('adminlogin')
def customer_signup_view(request):
userForm=forms.CustomerUserForm()
customerForm=forms.CustomerForm()
mydict={'userForm':userForm,'customerForm':customerForm}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST)
customerForm=forms.CustomerForm(request.POST,request.FILES)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
customer=customerForm.save(commit=False)
customer.user=user
customer.save()
my_customer_group = Group.objects.get_or_create(name='CUSTOMER')
my_customer_group[0].user_set.add(user)
return HttpResponseRedirect('customerlogin')
return render(request,'vehicle/customersignup.html',context=mydict)
def mechanic_signup_view(request):
userForm=forms.MechanicUserForm()
mechanicForm=forms.MechanicForm()
mydict={'userForm':userForm,'mechanicForm':mechanicForm}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST)
mechanicForm=forms.MechanicForm(request.POST,request.FILES)
if userForm.is_valid() and mechanicForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
mechanic=mechanicForm.save(commit=False)
mechanic.user=user
mechanic.save()
my_mechanic_group = Group.objects.get_or_create(name='MECHANIC')
my_mechanic_group[0].user_set.add(user)
return HttpResponseRedirect('mechaniclogin')
return render(request,'vehicle/mechanicsignup.html',context=mydict)
#for checking user customer, mechanic or admin(by sumit)
def is_customer(user):
return user.groups.filter(name='CUSTOMER').exists()
def is_mechanic(user):
return user.groups.filter(name='MECHANIC').exists()
def afterlogin_view(request):
if is_customer(request.user):
return redirect('customer-dashboard')
elif is_mechanic(request.user):
accountapproval=models.Mechanic.objects.all().filter(user_id=request.user.id,status=True)
if accountapproval:
return redirect('mechanic-dashboard')
else:
return render(request,'vehicle/mechanic_wait_for_approval.html')
else:
return redirect('admin-dashboard')
#============================================================================================
# ADMIN RELATED views start
#============================================================================================
@login_required(login_url='adminlogin')
def admin_dashboard_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
dict={
'total_customer':models.Customer.objects.all().count(),
'total_mechanic':models.Mechanic.objects.all().count(),
'total_request':models.Request.objects.all().count(),
'total_feedback':models.Feedback.objects.all().count(),
'data':zip(customers,enquiry),
}
return render(request,'vehicle/admin_dashboard.html',context=dict)
@login_required(login_url='adminlogin')
def admin_customer_view(request):
return render(request,'vehicle/admin_customer.html')
@login_required(login_url='adminlogin')
def admin_view_customer_view(request):
customers=models.Customer.objects.all()
return render(request,'vehicle/admin_view_customer.html',{'customers':customers})
@login_required(login_url='adminlogin')
def delete_customer_view(request,pk):
customer=models.Customer.objects.get(id=pk)
user=models.User.objects.get(id=customer.user_id)
user.delete()
customer.delete()
return redirect('admin-view-customer')
@login_required(login_url='adminlogin')
def update_customer_view(request,pk):
customer=models.Customer.objects.get(id=pk)
user=models.User.objects.get(id=customer.user_id)
userForm=forms.CustomerUserForm(instance=user)
customerForm=forms.CustomerForm(request.FILES,instance=customer)
mydict={'userForm':userForm,'customerForm':customerForm}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST,instance=user)
customerForm=forms.CustomerForm(request.POST,request.FILES,instance=customer)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
customerForm.save()
return redirect('admin-view-customer')
return render(request,'vehicle/update_customer.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_add_customer_view(request):
userForm=forms.CustomerUserForm()
customerForm=forms.CustomerForm()
mydict={'userForm':userForm,'customerForm':customerForm}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST)
customerForm=forms.CustomerForm(request.POST,request.FILES)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
customer=customerForm.save(commit=False)
customer.user=user
customer.save()
my_customer_group = Group.objects.get_or_create(name='CUSTOMER')
my_customer_group[0].user_set.add(user)
return HttpResponseRedirect('/admin-view-customer')
return render(request,'vehicle/admin_add_customer.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_view_customer_enquiry_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
return render(request,'vehicle/admin_view_customer_enquiry.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def admin_view_customer_invoice_view(request):
enquiry=models.Request.objects.values('customer_id').annotate(Sum('cost'))
print(enquiry)
customers=[]
for enq in enquiry:
print(enq)
customer=models.Customer.objects.get(id=enq['customer_id'])
customers.append(customer)
return render(request,'vehicle/admin_view_customer_invoice.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def admin_mechanic_view(request):
return render(request,'vehicle/admin_mechanic.html')
@login_required(login_url='adminlogin')
def admin_approve_mechanic_view(request):
mechanics=models.Mechanic.objects.all().filter(status=False)
return render(request,'vehicle/admin_approve_mechanic.html',{'mechanics':mechanics})
@login_required(login_url='adminlogin')
def approve_mechanic_view(request,pk):
mechanicSalary=forms.MechanicSalaryForm()
if request.method=='POST':
mechanicSalary=forms.MechanicSalaryForm(request.POST)
if mechanicSalary.is_valid():
mechanic=models.Mechanic.objects.get(id=pk)
mechanic.salary=mechanicSalary.cleaned_data['salary']
mechanic.status=True
mechanic.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-approve-mechanic')
return render(request,'vehicle/admin_approve_mechanic_details.html',{'mechanicSalary':mechanicSalary})
@login_required(login_url='adminlogin')
def delete_mechanic_view(request,pk):
mechanic=models.Mechanic.objects.get(id=pk)
user=models.User.objects.get(id=mechanic.user_id)
user.delete()
mechanic.delete()
return redirect('admin-approve-mechanic')
@login_required(login_url='adminlogin')
def admin_add_mechanic_view(request):
userForm=forms.MechanicUserForm()
mechanicForm=forms.MechanicForm()
mechanicSalary=forms.MechanicSalaryForm()
mydict={'userForm':userForm,'mechanicForm':mechanicForm,'mechanicSalary':mechanicSalary}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST)
mechanicForm=forms.MechanicForm(request.POST,request.FILES)
mechanicSalary=forms.MechanicSalaryForm(request.POST)
if userForm.is_valid() and mechanicForm.is_valid() and mechanicSalary.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
mechanic=mechanicForm.save(commit=False)
mechanic.user=user
mechanic.status=True
mechanic.salary=mechanicSalary.cleaned_data['salary']
mechanic.save()
my_mechanic_group = Group.objects.get_or_create(name='MECHANIC')
my_mechanic_group[0].user_set.add(user)
return HttpResponseRedirect('admin-view-mechanic')
else:
print('problem in form')
return render(request,'vehicle/admin_add_mechanic.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_view_mechanic_view(request):
mechanics=models.Mechanic.objects.all()
return render(request,'vehicle/admin_view_mechanic.html',{'mechanics':mechanics})
@login_required(login_url='adminlogin')
def delete_mechanic_view(request,pk):
mechanic=models.Mechanic.objects.get(id=pk)
user=models.User.objects.get(id=mechanic.user_id)
user.delete()
mechanic.delete()
return redirect('admin-view-mechanic')
@login_required(login_url='adminlogin')
def update_mechanic_view(request,pk):
mechanic=models.Mechanic.objects.get(id=pk)
user=models.User.objects.get(id=mechanic.user_id)
userForm=forms.MechanicUserForm(instance=user)
mechanicForm=forms.MechanicForm(request.FILES,instance=mechanic)
mydict={'userForm':userForm,'mechanicForm':mechanicForm}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST,instance=user)
mechanicForm=forms.MechanicForm(request.POST,request.FILES,instance=mechanic)
if userForm.is_valid() and mechanicForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
mechanicForm.save()
return redirect('admin-view-mechanic')
return render(request,'vehicle/update_mechanic.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_view_mechanic_salary_view(request):
mechanics=models.Mechanic.objects.all()
return render(request,'vehicle/admin_view_mechanic_salary.html',{'mechanics':mechanics})
@login_required(login_url='adminlogin')
def update_salary_view(request,pk):
mechanicSalary=forms.MechanicSalaryForm()
if request.method=='POST':
mechanicSalary=forms.MechanicSalaryForm(request.POST)
if mechanicSalary.is_valid():
mechanic=models.Mechanic.objects.get(id=pk)
mechanic.salary=mechanicSalary.cleaned_data['salary']
mechanic.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-view-mechanic-salary')
return render(request,'vehicle/admin_approve_mechanic_details.html',{'mechanicSalary':mechanicSalary})
@login_required(login_url='adminlogin')
def admin_request_view(request):
return render(request,'vehicle/admin_request.html')
@login_required(login_url='adminlogin')
def admin_view_request_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
return render(request,'vehicle/admin_view_request.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def change_status_view(request,pk):
adminenquiry=forms.AdminApproveRequestForm()
if request.method=='POST':
adminenquiry=forms.AdminApproveRequestForm(request.POST)
if adminenquiry.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.mechanic=adminenquiry.cleaned_data['mechanic']
enquiry_x.cost=adminenquiry.cleaned_data['cost']
enquiry_x.status=adminenquiry.cleaned_data['status']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-view-request')
return render(request,'vehicle/admin_approve_request_details.html',{'adminenquiry':adminenquiry})
@login_required(login_url='adminlogin')
def admin_delete_request_view(request,pk):
requests=models.Request.objects.get(id=pk)
requests.delete()
return redirect('admin-view-request')
@login_required(login_url='adminlogin')
def admin_add_request_view(request):
enquiry=forms.RequestForm()
adminenquiry=forms.AdminRequestForm()
mydict={'enquiry':enquiry,'adminenquiry':adminenquiry}
if request.method=='POST':
enquiry=forms.RequestForm(request.POST)
adminenquiry=forms.AdminRequestForm(request.POST)
if enquiry.is_valid() and adminenquiry.is_valid():
enquiry_x=enquiry.save(commit=False)
enquiry_x.customer=adminenquiry.cleaned_data['customer']
enquiry_x.mechanic=adminenquiry.cleaned_data['mechanic']
enquiry_x.cost=adminenquiry.cleaned_data['cost']
enquiry_x.status='Approved'
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('admin-view-request')
return render(request,'vehicle/admin_add_request.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_approve_request_view(request):
enquiry=models.Request.objects.all().filter(status='Pending')
return render(request,'vehicle/admin_approve_request.html',{'enquiry':enquiry})
@login_required(login_url='adminlogin')
def approve_request_view(request,pk):
adminenquiry=forms.AdminApproveRequestForm()
if request.method=='POST':
adminenquiry=forms.AdminApproveRequestForm(request.POST)
if adminenquiry.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.mechanic=adminenquiry.cleaned_data['mechanic']
enquiry_x.cost=adminenquiry.cleaned_data['cost']
enquiry_x.status=adminenquiry.cleaned_data['status']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-approve-request')
return render(request,'vehicle/admin_approve_request_details.html',{'adminenquiry':adminenquiry})
@login_required(login_url='adminlogin')
def admin_view_service_cost_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
print(customers)
return render(request,'vehicle/admin_view_service_cost.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def update_cost_view(request,pk):
updateCostForm=forms.UpdateCostForm()
if request.method=='POST':
updateCostForm=forms.UpdateCostForm(request.POST)
if updateCostForm.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.cost=updateCostForm.cleaned_data['cost']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-view-service-cost')
return render(request,'vehicle/update_cost.html',{'updateCostForm':updateCostForm})
@login_required(login_url='adminlogin')
def admin_mechanic_attendance_view(request):
return render(request,'vehicle/admin_mechanic_attendance.html')
@login_required(login_url='adminlogin')
def admin_take_attendance_view(request):
mechanics=models.Mechanic.objects.all().filter(status=True)
aform=forms.AttendanceForm()
if request.method=='POST':
form=forms.AttendanceForm(request.POST)
if form.is_valid():
Attendances=request.POST.getlist('present_status')
date=form.cleaned_data['date']
for i in range(len(Attendances)):
AttendanceModel=models.Attendance()
AttendanceModel.date=date
AttendanceModel.present_status=Attendances[i]
print(mechanics[i].id)
print(int(mechanics[i].id))
mechanic=models.Mechanic.objects.get(id=int(mechanics[i].id))
AttendanceModel.mechanic=mechanic
AttendanceModel.save()
return redirect('admin-view-attendance')
else:
print('form invalid')
return render(request,'vehicle/admin_take_attendance.html',{'mechanics':mechanics,'aform':aform})
@login_required(login_url='adminlogin')
def admin_view_attendance_view(request):
form=forms.AskDateForm()
if request.method=='POST':
form=forms.AskDateForm(request.POST)
if form.is_valid():
date=form.cleaned_data['date']
attendancedata=models.Attendance.objects.all().filter(date=date)
mechanicdata=models.Mechanic.objects.all().filter(status=True)
mylist=zip(attendancedata,mechanicdata)
return render(request,'vehicle/admin_view_attendance_page.html',{'mylist':mylist,'date':date})
else:
print('form invalid')
return render(request,'vehicle/admin_view_attendance_ask_date.html',{'form':form})
@login_required(login_url='adminlogin')
def admin_report_view(request):
reports=models.Request.objects.all().filter(Q(status="Repairing Done") | Q(status="Released"))
dict={
'reports':reports,
}
return render(request,'vehicle/admin_report.html',context=dict)
@login_required(login_url='adminlogin')
def admin_feedback_view(request):
feedback=models.Feedback.objects.all().order_by('-id')
return render(request,'vehicle/admin_feedback.html',{'feedback':feedback})
#============================================================================================
# ADMIN RELATED views END
#============================================================================================
#============================================================================================
# CUSTOMER RELATED views start
#============================================================================================
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_dashboard_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
work_in_progress=models.Request.objects.all().filter(customer_id=customer.id,status='Repairing').count()
work_completed=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Repairing Done") | Q(status="Released")).count()
new_request_made=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Pending") | Q(status="Approved")).count()
bill=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Repairing Done") | Q(status="Released")).aggregate(Sum('cost'))
print(bill)
dict={
'work_in_progress':work_in_progress,
'work_completed':work_completed,
'new_request_made':new_request_made,
'bill':bill['cost__sum'],
'customer':customer,
}
return render(request,'vehicle/customer_dashboard.html',context=dict)
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
return render(request,'vehicle/customer_request.html',{'customer':customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_view_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id , status="Pending")
return render(request,'vehicle/customer_view_request.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_delete_request_view(request,pk):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiry=models.Request.objects.get(id=pk)
enquiry.delete()
return redirect('customer-view-request')
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_view_approved_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id).exclude(status='Pending')
return render(request,'vehicle/customer_view_approved_request.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_view_approved_request_invoice_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id).exclude(status='Pending')
return render(request,'vehicle/customer_view_approved_request_invoice.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_add_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiry=forms.RequestForm()
if request.method=='POST':
enquiry=forms.RequestForm(request.POST)
if enquiry.is_valid():
customer=models.Customer.objects.get(user_id=request.user.id)
enquiry_x=enquiry.save(commit=False)
enquiry_x.customer=customer
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('customer-dashboard')
return render(request,'vehicle/customer_add_request.html',{'enquiry':enquiry,'customer':customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_profile_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
return render(request,'vehicle/customer_profile.html',{'customer':customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def edit_customer_profile_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
user=models.User.objects.get(id=customer.user_id)
userForm=forms.CustomerUserForm(instance=user)
customerForm=forms.CustomerForm(request.FILES,instance=customer)
mydict={'userForm':userForm,'customerForm':customerForm,'customer':customer}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST,instance=user)
customerForm=forms.CustomerForm(request.POST,instance=customer)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
customerForm.save()
return HttpResponseRedirect('customer-profile')
return render(request,'vehicle/edit_customer_profile.html',context=mydict)
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_invoice_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id).exclude(status='Pending')
return render(request,'vehicle/customer_invoice.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_feedback_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
feedback=forms.FeedbackForm()
if request.method=='POST':
feedback=forms.FeedbackForm(request.POST)
if feedback.is_valid():
feedback.save()
else:
print("form is invalid")
return render(request,'vehicle/feedback_sent_by_customer.html',{'customer':customer})
return render(request,'vehicle/customer_feedback.html',{'feedback':feedback,'customer':customer})
#============================================================================================
# CUSTOMER RELATED views END
#============================================================================================
#============================================================================================
# MECHANIC RELATED views start
#============================================================================================
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_dashboard_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
work_in_progress=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Repairing').count()
work_completed=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Repairing Done').count()
new_work_assigned=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Approved').count()
dict={
'work_in_progress':work_in_progress,
'work_completed':work_completed,
'new_work_assigned':new_work_assigned,
'salary':mechanic.salary,
'mechanic':mechanic,
}
return render(request,'vehicle/mechanic_dashboard.html',context=dict)
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_work_assigned_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
works=models.Request.objects.all().filter(mechanic_id=mechanic.id)
return render(request,'vehicle/mechanic_work_assigned.html',{'works':works,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_update_status_view(request,pk):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
updateStatus=forms.MechanicUpdateStatusForm()
if request.method=='POST':
updateStatus=forms.MechanicUpdateStatusForm(request.POST)
if updateStatus.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.status=updateStatus.cleaned_data['status']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/mechanic-work-assigned')
return render(request,'vehicle/mechanic_update_status.html',{'updateStatus':updateStatus,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_attendance_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
attendaces=models.Attendance.objects.all().filter(mechanic=mechanic)
return render(request,'vehicle/mechanic_view_attendance.html',{'attendaces':attendaces,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_feedback_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
feedback=forms.FeedbackForm()
if request.method=='POST':
feedback=forms.FeedbackForm(request.POST)
if feedback.is_valid():
feedback.save()
else:
print("form is invalid")
return render(request,'vehicle/feedback_sent.html',{'mechanic':mechanic})
return render(request,'vehicle/mechanic_feedback.html',{'feedback':feedback,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_salary_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
workdone=models.Request.objects.all().filter(mechanic_id=mechanic.id).filter(Q(status="Repairing Done") | Q(status="Released"))
return render(request,'vehicle/mechanic_salary.html',{'workdone':workdone,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_profile_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
return render(request,'vehicle/mechanic_profile.html',{'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def edit_mechanic_profile_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
user=models.User.objects.get(id=mechanic.user_id)
userForm=forms.MechanicUserForm(instance=user)
mechanicForm=forms.MechanicForm(request.FILES,instance=mechanic)
mydict={'userForm':userForm,'mechanicForm':mechanicForm,'mechanic':mechanic}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST,instance=user)
mechanicForm=forms.MechanicForm(request.POST,request.FILES,instance=mechanic)
if userForm.is_valid() and mechanicForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
mechanicForm.save()
return redirect('mechanic-profile')
return render(request,'vehicle/edit_mechanic_profile.html',context=mydict)
#============================================================================================
# MECHANIC RELATED views start
#============================================================================================
# for aboutus and contact
def aboutus_view(request):
return render(request,'vehicle/aboutus.html')
def contactus_view(request):
sub = forms.ContactusForm()
if request.method == 'POST':
sub = forms.ContactusForm(request.POST)
if sub.is_valid():
email = sub.cleaned_data['Email']
name=sub.cleaned_data['Name']
message = sub.cleaned_data['Message']
send_mail(str(name)+' || '+str(email),message,settings.EMAIL_HOST_USER, settings.EMAIL_RECEIVING_USER, fail_silently = False)
return render(request, 'vehicle/contactussuccess.html')
return render(request, 'vehicle/contactus.html', {'form':sub})
| en | 0.374225 | #for showing signup/login button for customer #for showing signup/login button for mechanics #for showing signup/login button for ADMIN(by sumit) #for checking user customer, mechanic or admin(by sumit) #============================================================================================ # ADMIN RELATED views start #============================================================================================ #============================================================================================ # ADMIN RELATED views END #============================================================================================ #============================================================================================ # CUSTOMER RELATED views start #============================================================================================ #============================================================================================ # CUSTOMER RELATED views END #============================================================================================ #============================================================================================ # MECHANIC RELATED views start #============================================================================================ #============================================================================================ # MECHANIC RELATED views start #============================================================================================ # for aboutus and contact | 2.135892 | 2 |
deep_disfluency/utils/tools.py | treena908/deep_disfluency | 0 | 10683 | import random
import numpy as np
import itertools
import re
from collections import defaultdict
import os
def get_tags(s, open_delim='<', close_delim='/>'):
"""Iterator to spit out the xml style disfluency tags in a given string.
Keyword arguments:
s -- input string
"""
while True:
# Search for the next two delimiters in the source text
start = s.find(open_delim)
end = s.find(close_delim)
# We found a non-empty match
if -1 < start < end:
# Skip the length of the open delimiter
start += len(open_delim)
# Spit out the tag
yield open_delim + s[start:end].strip() + close_delim
# Truncate string to start from last match
s = s[end+len(close_delim):]
else:
return
def remove_uttseg_tag(tag):
tags = get_tags(tag)
final_tag = ""
for t in tags:
m = re.search(r'<[ct]*/>', t)
if m:
continue
final_tag += t
return final_tag
def convert_to_simple_label(tag, rep="disf1_uttseg"):
"""Takes the complex tag set and gives back the simple,
smaller version with ten tags:
"""
disftag = "<f/>"
if "<rm-" in tag:
disftag = "<rm-0/>"
elif "<e" in tag:
disftag = "<e/>"
if "uttseg" in rep: # if combined task with TTO
m = re.search(r'<[ct]*/>', tag)
if m:
return disftag + m.group(0)
else:
print("WARNING NO TAG", +tag)
return ""
return disftag # if not TT0
def convert_to_simple_idx(tag, rep='1_trp'):
tag = convert_to_simple_label(tag, rep)
simple_tags = """<e/><cc/>
<e/><ct/>
<e/><tc/>
<e/><tt/>
<f/><cc/>
<f/><ct/>
<f/><tc/>
<f/><tt/>
<rm-0/><cc/>
<rm-0/><ct/>""".split("\n")
simple_tag_dict = {}
for s in range(0, len(simple_tags)):
simple_tag_dict[simple_tags[s].strip()] = s
return simple_tag_dict[tag]
def convert_from_full_tag_set_to_idx(tag, rep, idx_to_label):
"""Maps from the full tag set of trp repairs to the new dictionary"""
if "simple" in rep:
tag = convert_to_simple_label(tag)
for k, v in idx_to_label.items():
if v in tag: # a substring relation
return k
def add_word_continuation_tags(tags):
"""In place, add a continutation tag to each word:
<cc/> -word continues current dialogue act and the next word will also
continue it
<ct/> -word continues current dialogue act and is the last word of it
<tc/> -word starts this dialogue act tag and the next word continues it
<tt/> -word starts and ends dialogue act (single word dialogue act)
"""
tags = list(tags)
for i in range(0, len(tags)):
if i == 0:
tags[i] = tags[i] + "<t"
else:
tags[i] = tags[i] + "<c"
if i == len(tags)-1:
tags[i] = tags[i] + "t/>"
else:
tags[i] = tags[i] + "c/>"
return tags
def verify_disfluency_tags(tags, normalize_ID=False):
"""Check that the repair tags sequence is valid.
Keyword arguments:
normalize_ID -- boolean, whether to convert the repair ID
numbers to be derivable from their unique RPS position in the utterance.
"""
id_map = dict() # map between old ID and new ID
# in first pass get old and new IDs
for i in range(0, len(tags)):
rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[i])
if rps:
id_map[rps[0][rps[0].find("=")+2:-3]] = str(i)
# key: old repair ID, value, list [reparandum,interregnum,repair]
# all True when repair is all there
repairs = defaultdict(list)
for r in id_map.keys():
repairs[r] = [None, None, None] # three valued None<False<True
# print(repairs)
# second pass verify the validity of the tags
# and (optionally) modify the IDs
for i in range(0, len(tags)): # iterate over all tag strings
new_tags = []
if tags[i] == "":
assert(all([repairs[ID][2] or
repairs[ID] == [None, None, None]
for ID in repairs.keys()])),\
"Unresolved repairs at fluent tag\n\t" + str(repairs)
for tag in get_tags(tags[i]): # iterate over all tags
# print(i)
# print(tag)
if tag == "<e/>":
new_tags.append(tag)
continue
ID = tag[tag.find("=")+2:-3]
if "<rms" in tag:
assert repairs[ID][0] == None,\
"reparandum started parsed more than once " + ID
assert repairs[ID][1] == None,\
"reparandum start again during interregnum phase " + ID
assert repairs[ID][2] == None,\
"reparandum start again during repair phase " + ID
repairs[ID][0] = False # set in progress
elif "<rm " in tag:
assert repairs[ID][0] != None,\
"mid reparandum tag before reparandum start " + ID
assert repairs[ID][2] == None,\
"mid reparandum tag in a interregnum phase or beyond " + ID
assert repairs[ID][2] == None,\
"mid reparandum tag in a repair phase or beyond " + ID
elif "<i" in tag:
assert repairs[ID][0] != None,\
"interregnum start before reparandum start " + ID
assert repairs[ID][2] == None,\
"interregnum in a repair phase " + ID
if repairs[ID][1] == None: # interregnum not reached yet
repairs[ID][0] = True # reparandum completed
repairs[ID][1] = False # interregnum in progress
elif "<rps" in tag:
assert repairs[ID][0] != None,\
"repair start before reparandum start " + ID
assert repairs[ID][1] != True,\
"interregnum over before repair start " + ID
assert repairs[ID][2] == None,\
"repair start parsed twice " + ID
repairs[ID][0] = True # reparanudm complete
repairs[ID][1] = True # interregnum complete
repairs[ID][2] = False # repair in progress
elif "<rp " in tag:
assert repairs[ID][0] == True,\
"mid repair word start before reparandum end " + ID
assert repairs[ID][1] == True,\
"mid repair word start before interregnum end " + ID
assert repairs[ID][2] == False,\
"mid repair tag before repair start tag " + ID
elif "<rpn" in tag:
# make sure the rps is order in tag string is before
assert repairs[ID][0] == True,\
"repair end before reparandum end " + ID
assert repairs[ID][1] == True,\
"repair end before interregnum end " + ID
assert repairs[ID][2] == False,\
"repair end before repair start " + ID
repairs[ID][2] = True
# do the replacement of the tag's ID after checking
new_tags.append(tag.replace(ID, id_map[ID]))
if normalize_ID:
tags[i] = "".join(new_tags)
assert all([repairs[ID][2] for ID in repairs.keys()]),\
"Unresolved repairs:\n\t" + str(repairs)
def shuffle(lol, seed):
"""Shuffle inplace each list in the same order.
lol :: list of list as input
seed :: seed the shuffling
"""
for l in lol:
random.seed(seed)
random.shuffle(l)
def minibatch(l, bs):
"""Returns a list of minibatches of indexes
which size is equal to bs
border cases are treated as follow:
eg: [0,1,2,3] and bs = 3
will output:
[[0],[0,1],[0,1,2],[1,2,3]]
l :: list of word idxs
"""
out = [l[:i] for i in xrange(1, min(bs, len(l)+1))]
out += [l[i-bs:i] for i in xrange(bs, len(l)+1)]
assert len(l) == len(out)
return out
def indices_from_length(sentence_length, bs, start_index=0):
"""Return a list of indexes pairs (start/stop) for each word
max difference between start and stop equal to bs
border cases are treated as follow:
eg: sentenceLength=4 and bs = 3
will output:
[[0,0],[0,1],[0,2],[1,3]]
"""
l = map(lambda x: start_index+x, xrange(sentence_length))
out = []
for i in xrange(0, min(bs, len(l))):
out.append([l[0], l[i]])
for i in xrange(bs+1, len(l)+1):
out.append([l[i-bs], l[i-1]])
assert len(l) == sentence_length
return out
def context_win(l, win):
"""Return a list of list of indexes corresponding
to context windows surrounding each word in the sentence
given a list of indexes composing a sentence.
win :: int corresponding to the size of the window
"""
assert (win % 2) == 1
assert win >= 1
l = list(l)
lpadded = win/2 * [-1] + l + win/2 * [-1]
out = [lpadded[i:i+win] for i in range(len(l))]
assert len(out) == len(l)
return out
def context_win_backwards(l, win):
'''Same as contextwin except only backwards context
(i.e. like an n-gram model)
'''
assert win >= 1
l = list(l)
lpadded = (win-1) * [-1] + l
out = [lpadded[i: i+win] for i in range(len(l))]
assert len(out) == len(l)
return out
def corpus_to_indexed_matrix(my_array_list, win, bs, sentence=False):
"""Returns a matrix of contextwins for a list of utterances of
dimensions win * n_words_in_corpus
(i.e. total length of all arrays in my_array_list)
and corresponding matrix of indexes (of just start/stop for each one)
so 2 * n_words_in_corpus
of where to access these, using bs (backprop distance)
as the limiting history size
"""
sentences = [] # a list (of arrays, or lists?), returned as matrix
indices = [] # a list of index pairs (arrays?), returned as matrix
totalSize = 0
if sentence:
for sent in my_array_list:
mysent = np.asarray([-1] * (bs-1) + list(sent)) # padding with eos
# get list of context windows
mywords = context_win_backwards(mysent, win)
# just one per utterance for now..
cindices = [[totalSize, totalSize+len(mywords)-1]]
cwords = []
for i in range(bs, len(mywords)+1):
words = list(itertools.chain(*mywords[(i-bs):i]))
cwords.append(words) # always (bs * n) words long
# print cwords
sentences.extend(cwords)
indices.extend(cindices)
totalSize += len(cwords)
else:
for sentence in my_array_list:
# get list of context windows
cwords = context_win_backwards(sentence, win)
cindices = indices_from_length(len(cwords), bs, totalSize)
indices.extend(cindices)
sentences.extend(cwords)
totalSize += len(cwords)
for s in sentences:
if any([x is None for x in s]):
print(s)
return np.matrix(sentences, dtype='int32'), indices
def convert_from_eval_tags_to_inc_disfluency_tags(tags, words,
representation="disf1",
limit=8):
"""Conversion from disfluency tagged corpus with xml-style tags
as from STIR (https://bitbucket.org/julianhough/stir)
to the strictly left-to-right schemas as
described by Hough and Schlangen 2015 Interspeech paper,
which are used by RNN architectures at runtime.
Keyword arguments:
tags -- the STIR eval style disfluency tags
words -- the words in the utterance
representation -- the number corresponding to the type of tagging system
1=standard, 2=rm-N values where N does not count intervening edit terms
3=same as 2 but with a 'c' tag after edit terms have ended.
limit -- the limit on the distance back from the repair start
"""
repair_dict = defaultdict(list)
new_tags = []
# print("tags")
# print(tags)
# print('words')
# print(words)
for t in range(0, len(tags)):
if "uttseg" in representation:
m = re.search(r'<[ct]*/>', tags[t])
if m:
TTO_tag = m.group(0)
tags[t] = tags[t].replace(TTO_tag, "")
if "dact" in representation:
m = re.search(r'<diact type="[^\s]*"/>', tags[t])
if m:
dact_tag = m.group(0)
tags[t] = tags[t].replace(dact_tag, "")
if "laugh" in representation:
m = re.search(r'<speechLaugh/>|<laughter/>', tags[t])
if m:
laughter_tag = m.group(0)
else:
laughter_tag = "<nolaughter/>"
tags[t] = tags[t].replace(laughter_tag, "")
current_tag = ""
if "<e/>" in tags[t] or "<i" in tags[t]:
current_tag = "<e/>" # TODO may make this an interregnum
if "<rms" in tags[t]:
rms = re.findall("<rms id\=\"[0-9]+\"\/>", tags[t], re.S)
for r in rms:
repairID = r[r.find("=")+2:-3]
repair_dict[repairID] = [t, 0]
if "<rps" in tags[t]:
rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[t], re.S)
for r in rps:
repairID = r[r.find("=")+2:-3]
# print('repairID')
# print(repairID)
# print(repair_dict.get(repairID))
# print(str(repairID)+str(tags)+str(words))
assert repair_dict.get(repairID), str(repairID)+str(tags)+str(words)
repair_dict[repairID][1] = t
dist = min(t-repair_dict[repairID][0], limit)
# adjust in case the reparandum is shortened due to the limit
repair_dict[repairID][0] = t-dist
current_tag += "<rm-{}/>".format(dist) + "<rpMid/>"
if "<rpn" in tags[t]:
rpns = re.findall("<rpnrep id\=\"[0-9]+\"\/>", tags[t], re.S) +\
re.findall("<rpnsub id\=\"[0-9]+\"\/>", tags[t], re.S)
rpns_del = re.findall("<rpndel id\=\"[0-9]+\"\/>", tags[t], re.S)
# slight simplifying assumption is to take the repair with
# the longest reparandum as the end category
repair_type = ""
longestlength = 0
for r in rpns:
repairID = r[r.find("=")+2:-3]
l = repair_dict[repairID]
if l[1]-l[0] > longestlength:
longestlength = l[1]-l[0]
repair_type = "Sub"
for r in rpns_del:
repairID = r[r.find("=")+2:-3]
l = repair_dict[repairID]
if l[1]-l[0] > longestlength:
longestlength = l[1]-l[0]
repair_type = "Del"
if repair_type == "":
raise Exception("Repair not passed \
correctly."+str(words)+str(tags))
current_tag += "<rpEnd"+repair_type+"/>"
current_tag = current_tag.replace("<rpMid/>", "")
if current_tag == "":
current_tag = "<f/>"
if "uttseg" in representation:
current_tag += TTO_tag
if "dact" in representation:
current_tag += dact_tag
if "laugh" in representation:
current_tag += laughter_tag
new_tags.append(current_tag)
return new_tags
def convert_from_inc_disfluency_tags_to_eval_tags(
tags, words,
start=0,
representation="disf1_uttseg"):
"""Converts the incremental style output tags of the RNN to the standard
STIR eval output tags.
The exact inverse of convertFromEvalTagsToIncrementalDisfluencyTags.
Keyword arguments:
tags -- the RNN style disfluency tags
words -- the words in the utterance
start -- position from where to begin changing the tags from
representation -- the number corresponding to the type of tagging system,
1=standard, 2=rm-N values where N does not count intervening edit terms
3=same as 2 but with a 'c' tag after edit terms have ended.
"""
# maps from the repair ID to a list of
# [reparandumStart,repairStart,repairOver]
repair_dict = defaultdict(list)
new_tags = []
if start > 0:
# assuming the tags up to this point are already converted
new_tags = tags[:start]
if "mid" not in representation:
rps_s = re.findall("<rps id\=\"[0-9]+\"\/>", tags[start-1])
rpmid = re.findall("<rp id\=\"[0-9]+\"\/>", tags[start-1])
if rps_s:
for r in rps_s:
repairID = r[r.find("=")+2:-3]
resolved_repair = re.findall(
"<rpn[repsubdl]+ id\=\"{}\"\/>"
.format(repairID), tags[start-1])
if not resolved_repair:
if not rpmid:
rpmid = []
rpmid.append(r.replace("rps ", "rp "))
if rpmid:
newstart = start-1
for rp in rpmid:
rps = rp.replace("rp ", "rps ")
repairID = rp[rp.find("=")+2:-3]
# go back and find the repair
for b in range(newstart, -1, -1):
if rps in tags[b]:
repair_dict[repairID] = [b, b, False]
break
for t in range(start, len(tags)):
current_tag = ""
if "uttseg" in representation:
m = re.search(r'<[ct]*/>', tags[t])
if m:
TTO_tag = m.group(0)
if "<e/>" in tags[t] or "<i/>" in tags[t]:
current_tag = "<e/>"
if "<rm-" in tags[t]:
rps = re.findall("<rm-[0-9]+\/>", tags[t], re.S)
for r in rps: # should only be one
current_tag += '<rps id="{}"/>'.format(t)
# print t-dist
if "simple" in representation:
# simply tagging the rps
pass
else:
dist = int(r[r.find("-")+1:-2])
repair_dict[str(t)] = [max([0, t-dist]), t, False]
# backwards looking search if full set
# print new_tags, t, dist, t-dist, max([0, t-dist])
# print tags[:t+1]
rms_start_idx = max([0, t-dist])
new_tags[rms_start_idx] = '<rms id="{}"/>'\
.format(t) + new_tags[rms_start_idx]\
.replace("<f/>", "")
reparandum = False # interregnum if edit term
for b in range(t-1, max([0, t-dist]), -1):
if "<e" not in new_tags[b]:
reparandum = True
new_tags[b] = '<rm id="{}"/>'.format(t) +\
new_tags[b].replace("<f/>", "")
if reparandum is False and "<e" in new_tags[b]:
new_tags[b] = '<i id="{}"/>'.\
format(t) + new_tags[b]
# repair ends
if "<rpEnd" in tags[t]:
rpns = re.findall("<rpEndSub/>", tags[t], re.S)
rpns_del = re.findall("<rpEndDel/>", tags[t], re.S)
rpnAll = rpns + rpns_del
if rpnAll:
for k, v in repair_dict.items():
if t >= int(k) and v[2] is False:
repair_dict[k][2] = True
# classify the repair
if rpns_del: # a delete
current_tag += '<rpndel id="{}"/>'.format(k)
rpns_del.pop(0)
continue
reparandum = [words[i] for i in range(0, len(new_tags))
if '<rms id="{}"/>'.
format(k) in new_tags[i] or
'<rm id="{}"/>'.
format(k) in new_tags[i]]
repair = [words[i] for i in range(0, len(new_tags))
if '<rps id="{}"/>'.format(k)
in new_tags[i] or '<rp id="{}"/>'.format(k)
in new_tags[i]] + [words[t]]
if reparandum == repair:
current_tag += '<rpnrep id="{}"/>'.format(k)
else:
current_tag += '<rpnsub id="{}"/>'.format(k)
# mid repair phases still in progress
for k, v in repair_dict.items():
if t > int(k) and v[2] is False:
current_tag += '<rp id="{}"/>'.format(k)
if current_tag == "":
current_tag = "<f/>"
if "uttseg" in representation:
current_tag += TTO_tag
new_tags.append(current_tag)
return new_tags
def verify_dialogue_data_matrix(dialogue_data_matrix, word_dict=None,
pos_dict=None, tag_dict=None, n_lm=0,
n_acoustic=0):
"""Boolean check of whether dialogue data consistent
with args. Checks all idxs are valid and number of features is correct.
Standard form of each row of the matrix should be:
utt_index, word_idx, pos_idx, word_duration,
acoustic_feats.., lm_feats....,label
"""
l = 3 + n_acoustic + n_lm + 1 # row length
try:
for i, row in enumerate(dialogue_data_matrix):
assert len(row) == l,\
"row {} wrong length {}, should be {}".format(i, len(row), l)
assert word_dict[row[1]] is not None,\
"row[1][{}] {} not in word dict".format(i, row[1])
assert pos_dict[row[2]] is not None,\
"row[2][{}] {} not in POS dict".format(i, row[2])
assert tag_dict[row[-1]] is not None,\
"row[-1][{}] {} not in tag dict".format(i, row[-1])
except AssertionError as a:
print(a)
return False
return True
def verify_dialogue_data_matrices_from_folder(matrices_folder_filepath,
word_dict=None,
pos_dict=None,
tag_dict=None,
n_lm=0,
n_acoustic=0):
"""A boolean check that the dialogue matrices make sense for the
particular configuration in args and tag2idx dicts.
"""
for dialogue_file in os.listdir(matrices_folder_filepath):
v = np.load(matrices_folder_filepath + "/" + dialogue_file,allow_pickle=True)
if not verify_dialogue_data_matrix(v,
word_dict=word_dict,
pos_dict=pos_dict,
tag_dict=tag_dict,
n_lm=n_lm,
n_acoustic=n_acoustic):
# print"{} failed test".format(dialogue_file)
return False
return True
def dialogue_data_and_indices_from_matrix(d_matrix,
n_extra,
pre_seg=False,
window_size=2,
bs=9,
tag_rep="disf1_uttseg",
tag_to_idx_map=None,
in_utterances=False):
"""Transforming from input format of row:
utt_index, word_idx, pos_idx, word_duration,
acoustic_feats.., lm_feats....,label
to 5-tuple of:
word_idx, pos_idx, extra, labels, indices
where :word_idx: and :pos_idx: have the correct window context
according to @window_size
and :indices: is the start and stop points for consumption by the
net in training for each label in :labels:. :extra: is the matrix
of extra features.
"""
if len(d_matrix)==0:
return
utt_indices = d_matrix[:, 0]
words = d_matrix[:, 1]
pos = d_matrix[:, 2]
extra = None if n_extra == 0 else d_matrix[:, 3: -1]
labels = d_matrix[:, -1]
word_idx = []
pos_idx = []
current = []
indices = []
previous_idx = -1
for i, a_tuple in enumerate(zip(utt_indices, words, pos, labels)):
utt_idx, w, p, l = a_tuple
# print(w)
current.append((w, p, l))
if pre_seg:
if previous_idx != utt_idx or i == len(labels)-1:
if in_utterances:
start = 0 if indices == [] else indices[-1][1]+1
indices.append([start, start + (len(current)-1)])
else:
indices.extend(indices_from_length(len(current), bs,
start_index=len(indices)))
word_idx.extend(context_win_backwards([x[0] for x in current],
window_size))
pos_idx.extend(context_win_backwards([x[1] for x in current],
window_size))
current = []
# print('final')
# print(w)
# print(word_idx)
elif i == len(labels)-1:
# indices = indices_from_length(len(current), bs)
# currently a simple window of same size
indices = [[j, j + bs] for j in range(0, len(current))]
padding = [[-1, -1]] * (bs - window_size)
word_idx = padding + context_win_backwards([x[0] for x in current],
window_size)
pos_idx = padding + context_win_backwards([x[1] for x in current],
window_size)
previous_idx = utt_idx
# print(pos_idx)
# print(word_idx)
# print(extra)
# print(labels)
# print(indices)
# return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx,
# dtype=np.int32),\
# labels,\
# np.asarray(indices, dtype=np.int32)
return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx,
dtype=np.int32),\
extra,\
labels,\
np.asarray(indices, dtype=np.int32)
if __name__ == '__main__':
tags = '<f/>,<rms id="3"/>,<i id="3"/><e/>,<rps id="3"/>' +\
'<rpnsub id="3"/>,<f/>,<e/>,<f/>,' + \
'<f/>'
tags = tags.split(",")
words = "i,like,uh,love,to,uh,love,alot".split(",")
# print(tags)
# print(len(tags))
# print(len(words))
new_tags = convert_from_eval_tags_to_inc_disfluency_tags(
tags,
words,
representation="disf1")
# print(new_tags)
old_tags = convert_from_inc_disfluency_tags_to_eval_tags(
new_tags,
words,
representation="disf1")
assert old_tags == tags, "\n " + str(old_tags) + "\n" + str(tags)
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
# print(context_win_backwards(x, 2))
# print "indices", indices_from_length(11, 9)
| import random
import numpy as np
import itertools
import re
from collections import defaultdict
import os
def get_tags(s, open_delim='<', close_delim='/>'):
"""Iterator to spit out the xml style disfluency tags in a given string.
Keyword arguments:
s -- input string
"""
while True:
# Search for the next two delimiters in the source text
start = s.find(open_delim)
end = s.find(close_delim)
# We found a non-empty match
if -1 < start < end:
# Skip the length of the open delimiter
start += len(open_delim)
# Spit out the tag
yield open_delim + s[start:end].strip() + close_delim
# Truncate string to start from last match
s = s[end+len(close_delim):]
else:
return
def remove_uttseg_tag(tag):
tags = get_tags(tag)
final_tag = ""
for t in tags:
m = re.search(r'<[ct]*/>', t)
if m:
continue
final_tag += t
return final_tag
def convert_to_simple_label(tag, rep="disf1_uttseg"):
"""Takes the complex tag set and gives back the simple,
smaller version with ten tags:
"""
disftag = "<f/>"
if "<rm-" in tag:
disftag = "<rm-0/>"
elif "<e" in tag:
disftag = "<e/>"
if "uttseg" in rep: # if combined task with TTO
m = re.search(r'<[ct]*/>', tag)
if m:
return disftag + m.group(0)
else:
print("WARNING NO TAG", +tag)
return ""
return disftag # if not TT0
def convert_to_simple_idx(tag, rep='1_trp'):
tag = convert_to_simple_label(tag, rep)
simple_tags = """<e/><cc/>
<e/><ct/>
<e/><tc/>
<e/><tt/>
<f/><cc/>
<f/><ct/>
<f/><tc/>
<f/><tt/>
<rm-0/><cc/>
<rm-0/><ct/>""".split("\n")
simple_tag_dict = {}
for s in range(0, len(simple_tags)):
simple_tag_dict[simple_tags[s].strip()] = s
return simple_tag_dict[tag]
def convert_from_full_tag_set_to_idx(tag, rep, idx_to_label):
"""Maps from the full tag set of trp repairs to the new dictionary"""
if "simple" in rep:
tag = convert_to_simple_label(tag)
for k, v in idx_to_label.items():
if v in tag: # a substring relation
return k
def add_word_continuation_tags(tags):
"""In place, add a continutation tag to each word:
<cc/> -word continues current dialogue act and the next word will also
continue it
<ct/> -word continues current dialogue act and is the last word of it
<tc/> -word starts this dialogue act tag and the next word continues it
<tt/> -word starts and ends dialogue act (single word dialogue act)
"""
tags = list(tags)
for i in range(0, len(tags)):
if i == 0:
tags[i] = tags[i] + "<t"
else:
tags[i] = tags[i] + "<c"
if i == len(tags)-1:
tags[i] = tags[i] + "t/>"
else:
tags[i] = tags[i] + "c/>"
return tags
def verify_disfluency_tags(tags, normalize_ID=False):
"""Check that the repair tags sequence is valid.
Keyword arguments:
normalize_ID -- boolean, whether to convert the repair ID
numbers to be derivable from their unique RPS position in the utterance.
"""
id_map = dict() # map between old ID and new ID
# in first pass get old and new IDs
for i in range(0, len(tags)):
rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[i])
if rps:
id_map[rps[0][rps[0].find("=")+2:-3]] = str(i)
# key: old repair ID, value, list [reparandum,interregnum,repair]
# all True when repair is all there
repairs = defaultdict(list)
for r in id_map.keys():
repairs[r] = [None, None, None] # three valued None<False<True
# print(repairs)
# second pass verify the validity of the tags
# and (optionally) modify the IDs
for i in range(0, len(tags)): # iterate over all tag strings
new_tags = []
if tags[i] == "":
assert(all([repairs[ID][2] or
repairs[ID] == [None, None, None]
for ID in repairs.keys()])),\
"Unresolved repairs at fluent tag\n\t" + str(repairs)
for tag in get_tags(tags[i]): # iterate over all tags
# print(i)
# print(tag)
if tag == "<e/>":
new_tags.append(tag)
continue
ID = tag[tag.find("=")+2:-3]
if "<rms" in tag:
assert repairs[ID][0] == None,\
"reparandum started parsed more than once " + ID
assert repairs[ID][1] == None,\
"reparandum start again during interregnum phase " + ID
assert repairs[ID][2] == None,\
"reparandum start again during repair phase " + ID
repairs[ID][0] = False # set in progress
elif "<rm " in tag:
assert repairs[ID][0] != None,\
"mid reparandum tag before reparandum start " + ID
assert repairs[ID][2] == None,\
"mid reparandum tag in a interregnum phase or beyond " + ID
assert repairs[ID][2] == None,\
"mid reparandum tag in a repair phase or beyond " + ID
elif "<i" in tag:
assert repairs[ID][0] != None,\
"interregnum start before reparandum start " + ID
assert repairs[ID][2] == None,\
"interregnum in a repair phase " + ID
if repairs[ID][1] == None: # interregnum not reached yet
repairs[ID][0] = True # reparandum completed
repairs[ID][1] = False # interregnum in progress
elif "<rps" in tag:
assert repairs[ID][0] != None,\
"repair start before reparandum start " + ID
assert repairs[ID][1] != True,\
"interregnum over before repair start " + ID
assert repairs[ID][2] == None,\
"repair start parsed twice " + ID
repairs[ID][0] = True # reparanudm complete
repairs[ID][1] = True # interregnum complete
repairs[ID][2] = False # repair in progress
elif "<rp " in tag:
assert repairs[ID][0] == True,\
"mid repair word start before reparandum end " + ID
assert repairs[ID][1] == True,\
"mid repair word start before interregnum end " + ID
assert repairs[ID][2] == False,\
"mid repair tag before repair start tag " + ID
elif "<rpn" in tag:
# make sure the rps is order in tag string is before
assert repairs[ID][0] == True,\
"repair end before reparandum end " + ID
assert repairs[ID][1] == True,\
"repair end before interregnum end " + ID
assert repairs[ID][2] == False,\
"repair end before repair start " + ID
repairs[ID][2] = True
# do the replacement of the tag's ID after checking
new_tags.append(tag.replace(ID, id_map[ID]))
if normalize_ID:
tags[i] = "".join(new_tags)
assert all([repairs[ID][2] for ID in repairs.keys()]),\
"Unresolved repairs:\n\t" + str(repairs)
def shuffle(lol, seed):
"""Shuffle inplace each list in the same order.
lol :: list of list as input
seed :: seed the shuffling
"""
for l in lol:
random.seed(seed)
random.shuffle(l)
def minibatch(l, bs):
"""Returns a list of minibatches of indexes
which size is equal to bs
border cases are treated as follow:
eg: [0,1,2,3] and bs = 3
will output:
[[0],[0,1],[0,1,2],[1,2,3]]
l :: list of word idxs
"""
out = [l[:i] for i in xrange(1, min(bs, len(l)+1))]
out += [l[i-bs:i] for i in xrange(bs, len(l)+1)]
assert len(l) == len(out)
return out
def indices_from_length(sentence_length, bs, start_index=0):
"""Return a list of indexes pairs (start/stop) for each word
max difference between start and stop equal to bs
border cases are treated as follow:
eg: sentenceLength=4 and bs = 3
will output:
[[0,0],[0,1],[0,2],[1,3]]
"""
l = map(lambda x: start_index+x, xrange(sentence_length))
out = []
for i in xrange(0, min(bs, len(l))):
out.append([l[0], l[i]])
for i in xrange(bs+1, len(l)+1):
out.append([l[i-bs], l[i-1]])
assert len(l) == sentence_length
return out
def context_win(l, win):
"""Return a list of list of indexes corresponding
to context windows surrounding each word in the sentence
given a list of indexes composing a sentence.
win :: int corresponding to the size of the window
"""
assert (win % 2) == 1
assert win >= 1
l = list(l)
lpadded = win/2 * [-1] + l + win/2 * [-1]
out = [lpadded[i:i+win] for i in range(len(l))]
assert len(out) == len(l)
return out
def context_win_backwards(l, win):
'''Same as contextwin except only backwards context
(i.e. like an n-gram model)
'''
assert win >= 1
l = list(l)
lpadded = (win-1) * [-1] + l
out = [lpadded[i: i+win] for i in range(len(l))]
assert len(out) == len(l)
return out
def corpus_to_indexed_matrix(my_array_list, win, bs, sentence=False):
"""Returns a matrix of contextwins for a list of utterances of
dimensions win * n_words_in_corpus
(i.e. total length of all arrays in my_array_list)
and corresponding matrix of indexes (of just start/stop for each one)
so 2 * n_words_in_corpus
of where to access these, using bs (backprop distance)
as the limiting history size
"""
sentences = [] # a list (of arrays, or lists?), returned as matrix
indices = [] # a list of index pairs (arrays?), returned as matrix
totalSize = 0
if sentence:
for sent in my_array_list:
mysent = np.asarray([-1] * (bs-1) + list(sent)) # padding with eos
# get list of context windows
mywords = context_win_backwards(mysent, win)
# just one per utterance for now..
cindices = [[totalSize, totalSize+len(mywords)-1]]
cwords = []
for i in range(bs, len(mywords)+1):
words = list(itertools.chain(*mywords[(i-bs):i]))
cwords.append(words) # always (bs * n) words long
# print cwords
sentences.extend(cwords)
indices.extend(cindices)
totalSize += len(cwords)
else:
for sentence in my_array_list:
# get list of context windows
cwords = context_win_backwards(sentence, win)
cindices = indices_from_length(len(cwords), bs, totalSize)
indices.extend(cindices)
sentences.extend(cwords)
totalSize += len(cwords)
for s in sentences:
if any([x is None for x in s]):
print(s)
return np.matrix(sentences, dtype='int32'), indices
def convert_from_eval_tags_to_inc_disfluency_tags(tags, words,
representation="disf1",
limit=8):
"""Conversion from disfluency tagged corpus with xml-style tags
as from STIR (https://bitbucket.org/julianhough/stir)
to the strictly left-to-right schemas as
described by Hough and Schlangen 2015 Interspeech paper,
which are used by RNN architectures at runtime.
Keyword arguments:
tags -- the STIR eval style disfluency tags
words -- the words in the utterance
representation -- the number corresponding to the type of tagging system
1=standard, 2=rm-N values where N does not count intervening edit terms
3=same as 2 but with a 'c' tag after edit terms have ended.
limit -- the limit on the distance back from the repair start
"""
repair_dict = defaultdict(list)
new_tags = []
# print("tags")
# print(tags)
# print('words')
# print(words)
for t in range(0, len(tags)):
if "uttseg" in representation:
m = re.search(r'<[ct]*/>', tags[t])
if m:
TTO_tag = m.group(0)
tags[t] = tags[t].replace(TTO_tag, "")
if "dact" in representation:
m = re.search(r'<diact type="[^\s]*"/>', tags[t])
if m:
dact_tag = m.group(0)
tags[t] = tags[t].replace(dact_tag, "")
if "laugh" in representation:
m = re.search(r'<speechLaugh/>|<laughter/>', tags[t])
if m:
laughter_tag = m.group(0)
else:
laughter_tag = "<nolaughter/>"
tags[t] = tags[t].replace(laughter_tag, "")
current_tag = ""
if "<e/>" in tags[t] or "<i" in tags[t]:
current_tag = "<e/>" # TODO may make this an interregnum
if "<rms" in tags[t]:
rms = re.findall("<rms id\=\"[0-9]+\"\/>", tags[t], re.S)
for r in rms:
repairID = r[r.find("=")+2:-3]
repair_dict[repairID] = [t, 0]
if "<rps" in tags[t]:
rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[t], re.S)
for r in rps:
repairID = r[r.find("=")+2:-3]
# print('repairID')
# print(repairID)
# print(repair_dict.get(repairID))
# print(str(repairID)+str(tags)+str(words))
assert repair_dict.get(repairID), str(repairID)+str(tags)+str(words)
repair_dict[repairID][1] = t
dist = min(t-repair_dict[repairID][0], limit)
# adjust in case the reparandum is shortened due to the limit
repair_dict[repairID][0] = t-dist
current_tag += "<rm-{}/>".format(dist) + "<rpMid/>"
if "<rpn" in tags[t]:
rpns = re.findall("<rpnrep id\=\"[0-9]+\"\/>", tags[t], re.S) +\
re.findall("<rpnsub id\=\"[0-9]+\"\/>", tags[t], re.S)
rpns_del = re.findall("<rpndel id\=\"[0-9]+\"\/>", tags[t], re.S)
# slight simplifying assumption is to take the repair with
# the longest reparandum as the end category
repair_type = ""
longestlength = 0
for r in rpns:
repairID = r[r.find("=")+2:-3]
l = repair_dict[repairID]
if l[1]-l[0] > longestlength:
longestlength = l[1]-l[0]
repair_type = "Sub"
for r in rpns_del:
repairID = r[r.find("=")+2:-3]
l = repair_dict[repairID]
if l[1]-l[0] > longestlength:
longestlength = l[1]-l[0]
repair_type = "Del"
if repair_type == "":
raise Exception("Repair not passed \
correctly."+str(words)+str(tags))
current_tag += "<rpEnd"+repair_type+"/>"
current_tag = current_tag.replace("<rpMid/>", "")
if current_tag == "":
current_tag = "<f/>"
if "uttseg" in representation:
current_tag += TTO_tag
if "dact" in representation:
current_tag += dact_tag
if "laugh" in representation:
current_tag += laughter_tag
new_tags.append(current_tag)
return new_tags
def convert_from_inc_disfluency_tags_to_eval_tags(
tags, words,
start=0,
representation="disf1_uttseg"):
"""Converts the incremental style output tags of the RNN to the standard
STIR eval output tags.
The exact inverse of convertFromEvalTagsToIncrementalDisfluencyTags.
Keyword arguments:
tags -- the RNN style disfluency tags
words -- the words in the utterance
start -- position from where to begin changing the tags from
representation -- the number corresponding to the type of tagging system,
1=standard, 2=rm-N values where N does not count intervening edit terms
3=same as 2 but with a 'c' tag after edit terms have ended.
"""
# maps from the repair ID to a list of
# [reparandumStart,repairStart,repairOver]
repair_dict = defaultdict(list)
new_tags = []
if start > 0:
# assuming the tags up to this point are already converted
new_tags = tags[:start]
if "mid" not in representation:
rps_s = re.findall("<rps id\=\"[0-9]+\"\/>", tags[start-1])
rpmid = re.findall("<rp id\=\"[0-9]+\"\/>", tags[start-1])
if rps_s:
for r in rps_s:
repairID = r[r.find("=")+2:-3]
resolved_repair = re.findall(
"<rpn[repsubdl]+ id\=\"{}\"\/>"
.format(repairID), tags[start-1])
if not resolved_repair:
if not rpmid:
rpmid = []
rpmid.append(r.replace("rps ", "rp "))
if rpmid:
newstart = start-1
for rp in rpmid:
rps = rp.replace("rp ", "rps ")
repairID = rp[rp.find("=")+2:-3]
# go back and find the repair
for b in range(newstart, -1, -1):
if rps in tags[b]:
repair_dict[repairID] = [b, b, False]
break
for t in range(start, len(tags)):
current_tag = ""
if "uttseg" in representation:
m = re.search(r'<[ct]*/>', tags[t])
if m:
TTO_tag = m.group(0)
if "<e/>" in tags[t] or "<i/>" in tags[t]:
current_tag = "<e/>"
if "<rm-" in tags[t]:
rps = re.findall("<rm-[0-9]+\/>", tags[t], re.S)
for r in rps: # should only be one
current_tag += '<rps id="{}"/>'.format(t)
# print t-dist
if "simple" in representation:
# simply tagging the rps
pass
else:
dist = int(r[r.find("-")+1:-2])
repair_dict[str(t)] = [max([0, t-dist]), t, False]
# backwards looking search if full set
# print new_tags, t, dist, t-dist, max([0, t-dist])
# print tags[:t+1]
rms_start_idx = max([0, t-dist])
new_tags[rms_start_idx] = '<rms id="{}"/>'\
.format(t) + new_tags[rms_start_idx]\
.replace("<f/>", "")
reparandum = False # interregnum if edit term
for b in range(t-1, max([0, t-dist]), -1):
if "<e" not in new_tags[b]:
reparandum = True
new_tags[b] = '<rm id="{}"/>'.format(t) +\
new_tags[b].replace("<f/>", "")
if reparandum is False and "<e" in new_tags[b]:
new_tags[b] = '<i id="{}"/>'.\
format(t) + new_tags[b]
# repair ends
if "<rpEnd" in tags[t]:
rpns = re.findall("<rpEndSub/>", tags[t], re.S)
rpns_del = re.findall("<rpEndDel/>", tags[t], re.S)
rpnAll = rpns + rpns_del
if rpnAll:
for k, v in repair_dict.items():
if t >= int(k) and v[2] is False:
repair_dict[k][2] = True
# classify the repair
if rpns_del: # a delete
current_tag += '<rpndel id="{}"/>'.format(k)
rpns_del.pop(0)
continue
reparandum = [words[i] for i in range(0, len(new_tags))
if '<rms id="{}"/>'.
format(k) in new_tags[i] or
'<rm id="{}"/>'.
format(k) in new_tags[i]]
repair = [words[i] for i in range(0, len(new_tags))
if '<rps id="{}"/>'.format(k)
in new_tags[i] or '<rp id="{}"/>'.format(k)
in new_tags[i]] + [words[t]]
if reparandum == repair:
current_tag += '<rpnrep id="{}"/>'.format(k)
else:
current_tag += '<rpnsub id="{}"/>'.format(k)
# mid repair phases still in progress
for k, v in repair_dict.items():
if t > int(k) and v[2] is False:
current_tag += '<rp id="{}"/>'.format(k)
if current_tag == "":
current_tag = "<f/>"
if "uttseg" in representation:
current_tag += TTO_tag
new_tags.append(current_tag)
return new_tags
def verify_dialogue_data_matrix(dialogue_data_matrix, word_dict=None,
pos_dict=None, tag_dict=None, n_lm=0,
n_acoustic=0):
"""Boolean check of whether dialogue data consistent
with args. Checks all idxs are valid and number of features is correct.
Standard form of each row of the matrix should be:
utt_index, word_idx, pos_idx, word_duration,
acoustic_feats.., lm_feats....,label
"""
l = 3 + n_acoustic + n_lm + 1 # row length
try:
for i, row in enumerate(dialogue_data_matrix):
assert len(row) == l,\
"row {} wrong length {}, should be {}".format(i, len(row), l)
assert word_dict[row[1]] is not None,\
"row[1][{}] {} not in word dict".format(i, row[1])
assert pos_dict[row[2]] is not None,\
"row[2][{}] {} not in POS dict".format(i, row[2])
assert tag_dict[row[-1]] is not None,\
"row[-1][{}] {} not in tag dict".format(i, row[-1])
except AssertionError as a:
print(a)
return False
return True
def verify_dialogue_data_matrices_from_folder(matrices_folder_filepath,
word_dict=None,
pos_dict=None,
tag_dict=None,
n_lm=0,
n_acoustic=0):
"""A boolean check that the dialogue matrices make sense for the
particular configuration in args and tag2idx dicts.
"""
for dialogue_file in os.listdir(matrices_folder_filepath):
v = np.load(matrices_folder_filepath + "/" + dialogue_file,allow_pickle=True)
if not verify_dialogue_data_matrix(v,
word_dict=word_dict,
pos_dict=pos_dict,
tag_dict=tag_dict,
n_lm=n_lm,
n_acoustic=n_acoustic):
# print"{} failed test".format(dialogue_file)
return False
return True
def dialogue_data_and_indices_from_matrix(d_matrix,
n_extra,
pre_seg=False,
window_size=2,
bs=9,
tag_rep="disf1_uttseg",
tag_to_idx_map=None,
in_utterances=False):
"""Transforming from input format of row:
utt_index, word_idx, pos_idx, word_duration,
acoustic_feats.., lm_feats....,label
to 5-tuple of:
word_idx, pos_idx, extra, labels, indices
where :word_idx: and :pos_idx: have the correct window context
according to @window_size
and :indices: is the start and stop points for consumption by the
net in training for each label in :labels:. :extra: is the matrix
of extra features.
"""
if len(d_matrix)==0:
return
utt_indices = d_matrix[:, 0]
words = d_matrix[:, 1]
pos = d_matrix[:, 2]
extra = None if n_extra == 0 else d_matrix[:, 3: -1]
labels = d_matrix[:, -1]
word_idx = []
pos_idx = []
current = []
indices = []
previous_idx = -1
for i, a_tuple in enumerate(zip(utt_indices, words, pos, labels)):
utt_idx, w, p, l = a_tuple
# print(w)
current.append((w, p, l))
if pre_seg:
if previous_idx != utt_idx or i == len(labels)-1:
if in_utterances:
start = 0 if indices == [] else indices[-1][1]+1
indices.append([start, start + (len(current)-1)])
else:
indices.extend(indices_from_length(len(current), bs,
start_index=len(indices)))
word_idx.extend(context_win_backwards([x[0] for x in current],
window_size))
pos_idx.extend(context_win_backwards([x[1] for x in current],
window_size))
current = []
# print('final')
# print(w)
# print(word_idx)
elif i == len(labels)-1:
# indices = indices_from_length(len(current), bs)
# currently a simple window of same size
indices = [[j, j + bs] for j in range(0, len(current))]
padding = [[-1, -1]] * (bs - window_size)
word_idx = padding + context_win_backwards([x[0] for x in current],
window_size)
pos_idx = padding + context_win_backwards([x[1] for x in current],
window_size)
previous_idx = utt_idx
# print(pos_idx)
# print(word_idx)
# print(extra)
# print(labels)
# print(indices)
# return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx,
# dtype=np.int32),\
# labels,\
# np.asarray(indices, dtype=np.int32)
return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx,
dtype=np.int32),\
extra,\
labels,\
np.asarray(indices, dtype=np.int32)
if __name__ == '__main__':
tags = '<f/>,<rms id="3"/>,<i id="3"/><e/>,<rps id="3"/>' +\
'<rpnsub id="3"/>,<f/>,<e/>,<f/>,' + \
'<f/>'
tags = tags.split(",")
words = "i,like,uh,love,to,uh,love,alot".split(",")
# print(tags)
# print(len(tags))
# print(len(words))
new_tags = convert_from_eval_tags_to_inc_disfluency_tags(
tags,
words,
representation="disf1")
# print(new_tags)
old_tags = convert_from_inc_disfluency_tags_to_eval_tags(
new_tags,
words,
representation="disf1")
assert old_tags == tags, "\n " + str(old_tags) + "\n" + str(tags)
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
# print(context_win_backwards(x, 2))
# print "indices", indices_from_length(11, 9)
| en | 0.753053 | Iterator to spit out the xml style disfluency tags in a given string. Keyword arguments: s -- input string # Search for the next two delimiters in the source text # We found a non-empty match # Skip the length of the open delimiter # Spit out the tag # Truncate string to start from last match Takes the complex tag set and gives back the simple, smaller version with ten tags: # if combined task with TTO # if not TT0 <e/><cc/> <e/><ct/> <e/><tc/> <e/><tt/> <f/><cc/> <f/><ct/> <f/><tc/> <f/><tt/> <rm-0/><cc/> <rm-0/><ct/> Maps from the full tag set of trp repairs to the new dictionary # a substring relation In place, add a continutation tag to each word: <cc/> -word continues current dialogue act and the next word will also continue it <ct/> -word continues current dialogue act and is the last word of it <tc/> -word starts this dialogue act tag and the next word continues it <tt/> -word starts and ends dialogue act (single word dialogue act) Check that the repair tags sequence is valid. Keyword arguments: normalize_ID -- boolean, whether to convert the repair ID numbers to be derivable from their unique RPS position in the utterance. # map between old ID and new ID # in first pass get old and new IDs # key: old repair ID, value, list [reparandum,interregnum,repair] # all True when repair is all there # three valued None<False<True # print(repairs) # second pass verify the validity of the tags # and (optionally) modify the IDs # iterate over all tag strings # iterate over all tags # print(i) # print(tag) # set in progress # interregnum not reached yet # reparandum completed # interregnum in progress # reparanudm complete # interregnum complete # repair in progress # make sure the rps is order in tag string is before # do the replacement of the tag's ID after checking Shuffle inplace each list in the same order. lol :: list of list as input seed :: seed the shuffling Returns a list of minibatches of indexes which size is equal to bs border cases are treated as follow: eg: [0,1,2,3] and bs = 3 will output: [[0],[0,1],[0,1,2],[1,2,3]] l :: list of word idxs Return a list of indexes pairs (start/stop) for each word max difference between start and stop equal to bs border cases are treated as follow: eg: sentenceLength=4 and bs = 3 will output: [[0,0],[0,1],[0,2],[1,3]] Return a list of list of indexes corresponding to context windows surrounding each word in the sentence given a list of indexes composing a sentence. win :: int corresponding to the size of the window Same as contextwin except only backwards context (i.e. like an n-gram model) Returns a matrix of contextwins for a list of utterances of dimensions win * n_words_in_corpus (i.e. total length of all arrays in my_array_list) and corresponding matrix of indexes (of just start/stop for each one) so 2 * n_words_in_corpus of where to access these, using bs (backprop distance) as the limiting history size # a list (of arrays, or lists?), returned as matrix # a list of index pairs (arrays?), returned as matrix # padding with eos # get list of context windows # just one per utterance for now.. # always (bs * n) words long # print cwords # get list of context windows Conversion from disfluency tagged corpus with xml-style tags as from STIR (https://bitbucket.org/julianhough/stir) to the strictly left-to-right schemas as described by Hough and Schlangen 2015 Interspeech paper, which are used by RNN architectures at runtime. Keyword arguments: tags -- the STIR eval style disfluency tags words -- the words in the utterance representation -- the number corresponding to the type of tagging system 1=standard, 2=rm-N values where N does not count intervening edit terms 3=same as 2 but with a 'c' tag after edit terms have ended. limit -- the limit on the distance back from the repair start # print("tags") # print(tags) # print('words') # print(words) # TODO may make this an interregnum # print('repairID') # print(repairID) # print(repair_dict.get(repairID)) # print(str(repairID)+str(tags)+str(words)) # adjust in case the reparandum is shortened due to the limit # slight simplifying assumption is to take the repair with # the longest reparandum as the end category Converts the incremental style output tags of the RNN to the standard STIR eval output tags. The exact inverse of convertFromEvalTagsToIncrementalDisfluencyTags. Keyword arguments: tags -- the RNN style disfluency tags words -- the words in the utterance start -- position from where to begin changing the tags from representation -- the number corresponding to the type of tagging system, 1=standard, 2=rm-N values where N does not count intervening edit terms 3=same as 2 but with a 'c' tag after edit terms have ended. # maps from the repair ID to a list of # [reparandumStart,repairStart,repairOver] # assuming the tags up to this point are already converted # go back and find the repair # should only be one # print t-dist # simply tagging the rps # backwards looking search if full set # print new_tags, t, dist, t-dist, max([0, t-dist]) # print tags[:t+1] # interregnum if edit term # repair ends # classify the repair # a delete # mid repair phases still in progress Boolean check of whether dialogue data consistent with args. Checks all idxs are valid and number of features is correct. Standard form of each row of the matrix should be: utt_index, word_idx, pos_idx, word_duration, acoustic_feats.., lm_feats....,label # row length A boolean check that the dialogue matrices make sense for the particular configuration in args and tag2idx dicts. # print"{} failed test".format(dialogue_file) Transforming from input format of row: utt_index, word_idx, pos_idx, word_duration, acoustic_feats.., lm_feats....,label to 5-tuple of: word_idx, pos_idx, extra, labels, indices where :word_idx: and :pos_idx: have the correct window context according to @window_size and :indices: is the start and stop points for consumption by the net in training for each label in :labels:. :extra: is the matrix of extra features. # print(w) # print('final') # print(w) # print(word_idx) # indices = indices_from_length(len(current), bs) # currently a simple window of same size # print(pos_idx) # print(word_idx) # print(extra) # print(labels) # print(indices) # return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx, # dtype=np.int32),\ # labels,\ # np.asarray(indices, dtype=np.int32) # print(tags) # print(len(tags)) # print(len(words)) # print(new_tags) # print(context_win_backwards(x, 2)) # print "indices", indices_from_length(11, 9) | 3.224043 | 3 |
library/favourite/api/pagination.py | furkan-34/library-DRF-django-api | 0 | 10684 | from rest_framework.pagination import PageNumberPagination
class FavouritePagination(PageNumberPagination):
page_size=4 | from rest_framework.pagination import PageNumberPagination
class FavouritePagination(PageNumberPagination):
page_size=4 | none | 1 | 1.354843 | 1 |
|
Python-Math/Python-Math/check_prime.py | rgabeflores/Scripts | 2 | 10685 | <gh_stars>1-10
'''
@author <NAME>
Checks the primality of an integer.
'''
def is_prime(x):
'''
Checks the primality of an integer.
'''
sqrt = int(x ** (1/2))
for i in range(2, sqrt, 1):
if x % i == 0:
return False
return True
def main():
try:
print("\n\n")
a = int(input(" Enter an integer to check if it is prime: "))
if is_prime(a):
print("\n ",a,"is a prime number.\n")
else:
print("\n ",a,"is not a prime number.\n")
except ValueError as e:
print("\n\n Please enter a valid choice.\n")
if __name__ == "__main__":
main() | '''
@author <NAME>
Checks the primality of an integer.
'''
def is_prime(x):
'''
Checks the primality of an integer.
'''
sqrt = int(x ** (1/2))
for i in range(2, sqrt, 1):
if x % i == 0:
return False
return True
def main():
try:
print("\n\n")
a = int(input(" Enter an integer to check if it is prime: "))
if is_prime(a):
print("\n ",a,"is a prime number.\n")
else:
print("\n ",a,"is not a prime number.\n")
except ValueError as e:
print("\n\n Please enter a valid choice.\n")
if __name__ == "__main__":
main() | en | 0.624917 | @author <NAME> Checks the primality of an integer. Checks the primality of an integer. | 4.116588 | 4 |
src/contrib/cortex-strings/scripts/plot-top.py | lastweek/source-freebsd | 0 | 10686 | <reponame>lastweek/source-freebsd
#!/usr/bin/env python
"""Plot the performance of different variants of the string routines
for one size.
"""
import libplot
import pylab
def plot(records, bytes):
records = [x for x in records if x.bytes==bytes]
variants = libplot.unique(records, 'variant', prefer='this')
functions = libplot.unique(records, 'function')
X = pylab.arange(len(functions))
width = 1.0/(len(variants)+1)
colours = libplot.make_colours()
pylab.figure(1).set_size_inches((16, 12))
pylab.clf()
for i, variant in enumerate(variants):
heights = []
for function in functions:
matches = [x for x in records if x.variant==variant and x.function==function and x.src_alignment==8]
if matches:
vals = [match.bytes*match.loops/match.elapsed/(1024*1024) for
match in matches]
mean = sum(vals)/len(vals)
heights.append(mean)
else:
heights.append(0)
pylab.bar(X+i*width, heights, width, color=colours.next(), label=variant)
axes = pylab.axes()
axes.set_xticklabels(functions)
axes.set_xticks(X + 0.5)
pylab.title('Performance of different variants for %d byte blocks' % bytes)
pylab.ylabel('Rate (MB/s)')
pylab.legend(loc='upper left', ncol=3)
pylab.grid()
pylab.savefig('top-%06d.png' % bytes, dpi=72)
def main():
records = libplot.parse()
for bytes in libplot.unique(records, 'bytes'):
plot(records, bytes)
pylab.show()
if __name__ == '__main__':
main()
| #!/usr/bin/env python
"""Plot the performance of different variants of the string routines
for one size.
"""
import libplot
import pylab
def plot(records, bytes):
records = [x for x in records if x.bytes==bytes]
variants = libplot.unique(records, 'variant', prefer='this')
functions = libplot.unique(records, 'function')
X = pylab.arange(len(functions))
width = 1.0/(len(variants)+1)
colours = libplot.make_colours()
pylab.figure(1).set_size_inches((16, 12))
pylab.clf()
for i, variant in enumerate(variants):
heights = []
for function in functions:
matches = [x for x in records if x.variant==variant and x.function==function and x.src_alignment==8]
if matches:
vals = [match.bytes*match.loops/match.elapsed/(1024*1024) for
match in matches]
mean = sum(vals)/len(vals)
heights.append(mean)
else:
heights.append(0)
pylab.bar(X+i*width, heights, width, color=colours.next(), label=variant)
axes = pylab.axes()
axes.set_xticklabels(functions)
axes.set_xticks(X + 0.5)
pylab.title('Performance of different variants for %d byte blocks' % bytes)
pylab.ylabel('Rate (MB/s)')
pylab.legend(loc='upper left', ncol=3)
pylab.grid()
pylab.savefig('top-%06d.png' % bytes, dpi=72)
def main():
records = libplot.parse()
for bytes in libplot.unique(records, 'bytes'):
plot(records, bytes)
pylab.show()
if __name__ == '__main__':
main() | en | 0.651911 | #!/usr/bin/env python Plot the performance of different variants of the string routines for one size. | 2.779841 | 3 |
part1.py | aspiringguru/python_sqlite_demo | 0 | 10687 | <gh_stars>0
import sqlite3
import time, datetime, random
import matplotlib
matplotlib.use("Agg")
#added due to error, possibly due to install configuration
import matplotlib.pyplot as plt
print(matplotlib.get_backend())
import matplotlib.dates as mdates
from matplotlib import style
style.use('fivethirtyeight')
conn = sqlite3.connect("part1.db")
c = conn.cursor()
def create_table():
c.execute('CREATE TABLE IF NOT EXISTS stufftoplot(unix REAL, datestamp TEXT, keyword TEXT, value REAL)')
def data_entry():
c.execute("INSERT into stufftoplot VALUES(123456, '2016-01-01', 'some keywords', 5)")
conn.commit()
def data_insert(unix, date, keyword, value):
c.execute("INSERT into stufftoplot (unix, datestamp, keyword, value) VALUES(?, ?, ?, ?) ", (unix, date, keyword, value))
conn.commit()
def select_all_tasks(c):
"""
Query all rows in the tasks table
:param conn: the Connection object
:return:
"""
c.execute("SELECT * FROM stufftoplot")
rows = c.fetchall()
for row in rows:
print(row)
def dynamic_data_entry():
unix = time.time()
value = random.randrange(0,10)
print ("unix:", type(unix), unix, "value:", value)
date = str(datetime.datetime.fromtimestamp(unix).strftime('%Y-%m-%d %H:%M:%S'))
keyword = 'Python'
c.execute("INSERT into stufftoplot (unix, datestamp, keyword, value) VALUES (?, ?, ?, ?)", (unix, date, keyword, value))
conn.commit()
def read_from_db():
#c.execute('SELECT * FROM stufftoplot')
#c.execute("SELECT * FROM stufftoplot WHERE value = '5' AND keyword='python' COLLATE NOCASE")
#c.execute("SELECT * FROM stufftoplot WHERE value = 3 AND keyword='Python'")
c.execute("SELECT * FROM stufftoplot WHERE unix > 1529020514")
data = c.fetchall()
print (type(data))
print(data)
for row in data:
print (row)
def graph_data():
c.execute('SELECT unix, value FROM stufftoplot')
data = c.fetchall()
print (type(data))
dates = []
values = []
for row in data:
print (row[0])
print (datetime.datetime.fromtimestamp(row[0]))
dates.append(datetime.datetime.fromtimestamp(row[0]))
values.append(row[1])
plt.plot_date(dates, values, '-')
#plt.show()
plt.savefig("charts/output_chart.png")
print("chart plotted to file")
def del_and_update():
c.execute("SELECT * FROM stufftoplot")
temp = c.fetchall()
[print (row) for row in temp]
before = len(temp)
c.execute("SELECT * FROM stufftoplot WHERE value>5")
temp = c.fetchall()
num_matches = len(temp)
c.execute("UPDATE stufftoplot SET value=99 WHERE value=8")
conn.commit()
c.execute("SELECT * FROM stufftoplot")
temp = c.fetchall()
[print (row) for row in temp]
after = len(temp)
print ("before:", before)
print ("after:", after)
print ("num_matches:", num_matches)
def create_n_rows(n):
for i in range(n):
dynamic_data_entry()
time.sleep(1)
create_table()
#data_entry()
#data_insert(1111, "2016-01-02", "more keywords", 1)
#data_insert(2222, "2016-01-03", "less keywords", 2)
#dynamic_data_entry()
# time.sleep(1)
#select_all_tasks(c)
#read_from_db()
#graph_data()
create_n_rows(10)
del_and_update()
c.close()
conn.close()
| import sqlite3
import time, datetime, random
import matplotlib
matplotlib.use("Agg")
#added due to error, possibly due to install configuration
import matplotlib.pyplot as plt
print(matplotlib.get_backend())
import matplotlib.dates as mdates
from matplotlib import style
style.use('fivethirtyeight')
conn = sqlite3.connect("part1.db")
c = conn.cursor()
def create_table():
c.execute('CREATE TABLE IF NOT EXISTS stufftoplot(unix REAL, datestamp TEXT, keyword TEXT, value REAL)')
def data_entry():
c.execute("INSERT into stufftoplot VALUES(123456, '2016-01-01', 'some keywords', 5)")
conn.commit()
def data_insert(unix, date, keyword, value):
c.execute("INSERT into stufftoplot (unix, datestamp, keyword, value) VALUES(?, ?, ?, ?) ", (unix, date, keyword, value))
conn.commit()
def select_all_tasks(c):
"""
Query all rows in the tasks table
:param conn: the Connection object
:return:
"""
c.execute("SELECT * FROM stufftoplot")
rows = c.fetchall()
for row in rows:
print(row)
def dynamic_data_entry():
unix = time.time()
value = random.randrange(0,10)
print ("unix:", type(unix), unix, "value:", value)
date = str(datetime.datetime.fromtimestamp(unix).strftime('%Y-%m-%d %H:%M:%S'))
keyword = 'Python'
c.execute("INSERT into stufftoplot (unix, datestamp, keyword, value) VALUES (?, ?, ?, ?)", (unix, date, keyword, value))
conn.commit()
def read_from_db():
#c.execute('SELECT * FROM stufftoplot')
#c.execute("SELECT * FROM stufftoplot WHERE value = '5' AND keyword='python' COLLATE NOCASE")
#c.execute("SELECT * FROM stufftoplot WHERE value = 3 AND keyword='Python'")
c.execute("SELECT * FROM stufftoplot WHERE unix > 1529020514")
data = c.fetchall()
print (type(data))
print(data)
for row in data:
print (row)
def graph_data():
c.execute('SELECT unix, value FROM stufftoplot')
data = c.fetchall()
print (type(data))
dates = []
values = []
for row in data:
print (row[0])
print (datetime.datetime.fromtimestamp(row[0]))
dates.append(datetime.datetime.fromtimestamp(row[0]))
values.append(row[1])
plt.plot_date(dates, values, '-')
#plt.show()
plt.savefig("charts/output_chart.png")
print("chart plotted to file")
def del_and_update():
c.execute("SELECT * FROM stufftoplot")
temp = c.fetchall()
[print (row) for row in temp]
before = len(temp)
c.execute("SELECT * FROM stufftoplot WHERE value>5")
temp = c.fetchall()
num_matches = len(temp)
c.execute("UPDATE stufftoplot SET value=99 WHERE value=8")
conn.commit()
c.execute("SELECT * FROM stufftoplot")
temp = c.fetchall()
[print (row) for row in temp]
after = len(temp)
print ("before:", before)
print ("after:", after)
print ("num_matches:", num_matches)
def create_n_rows(n):
for i in range(n):
dynamic_data_entry()
time.sleep(1)
create_table()
#data_entry()
#data_insert(1111, "2016-01-02", "more keywords", 1)
#data_insert(2222, "2016-01-03", "less keywords", 2)
#dynamic_data_entry()
# time.sleep(1)
#select_all_tasks(c)
#read_from_db()
#graph_data()
create_n_rows(10)
del_and_update()
c.close()
conn.close() | en | 0.357809 | #added due to error, possibly due to install configuration Query all rows in the tasks table :param conn: the Connection object :return: #c.execute('SELECT * FROM stufftoplot') #c.execute("SELECT * FROM stufftoplot WHERE value = '5' AND keyword='python' COLLATE NOCASE") #c.execute("SELECT * FROM stufftoplot WHERE value = 3 AND keyword='Python'") #plt.show() #data_entry() #data_insert(1111, "2016-01-02", "more keywords", 1) #data_insert(2222, "2016-01-03", "less keywords", 2) #dynamic_data_entry() # time.sleep(1) #select_all_tasks(c) #read_from_db() #graph_data() | 2.952891 | 3 |
tests/test_oic_consumer.py | infohash/pyoidc | 0 | 10688 | <gh_stars>0
import json
import os
from urllib.parse import parse_qs
from urllib.parse import urlparse
import pytest
import responses
from freezegun import freeze_time
from jwkest import BadSignature
from jwkest.jwk import SYMKey
from oic.oauth2.message import MissingSigningKey
from oic.oauth2.message import WrongSigningAlgorithm
from oic.oic import DEF_SIGN_ALG
from oic.oic import Server
from oic.oic import response_types_to_grant_types
from oic.oic.consumer import IGNORE
from oic.oic.consumer import Consumer
from oic.oic.consumer import clean_response
from oic.oic.message import AccessTokenRequest
from oic.oic.message import AccessTokenResponse
from oic.oic.message import AuthorizationResponse
from oic.oic.message import IdToken
from oic.oic.message import OpenIDSchema
from oic.oic.message import ProviderConfigurationResponse
from oic.oic.message import RegistrationResponse
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from oic.utils.keyio import KeyBundle
from oic.utils.keyio import KeyJar
from oic.utils.keyio import keybundle_from_local_file
from oic.utils.sdb import DictSessionBackend
from oic.utils.sdb import session_get
from oic.utils.time_util import utc_time_sans_frac
__author__ = "rohe0002"
KC_SYM_VS = KeyBundle({"kty": "oct", "key": "<KEY>", "use": "ver"})
KC_SYM_S = KeyBundle({"kty": "oct", "key": "<KEY>", "use": "sig"})
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/keys"))
KC_RSA = keybundle_from_local_file(
os.path.join(BASE_PATH, "rsa.key"), "rsa", ["ver", "sig"]
)
SRVKEYS = KeyJar()
SRVKEYS[""] = [KC_RSA]
SRVKEYS["client_1"] = [KC_SYM_VS, KC_RSA]
CLIKEYS = KeyJar()
CLIKEYS["http://localhost:8088"] = [KC_RSA]
CLIKEYS[""] = [KC_RSA, KC_SYM_VS]
CLIKEYS["https://example.com"] = [KC_RSA]
SERVER_INFO = {
"version": "3.0",
"issuer": "https://localhost:8088",
"authorization_endpoint": "http://localhost:8088/authorization",
"token_endpoint": "http://localhost:8088/token",
"userinfo_endpoint": "http://localhost:8088/userinfo",
"flows_supported": ["code", "token"],
}
CONFIG = {
"authz_page": "authz",
"scope": ["openid"],
"response_type": "code",
"request_method": "parameter",
"password": "<PASSWORD>",
"max_age": 3600,
"user_info": {"name": None},
}
def _eq(l1, l2):
return set(l1) == set(l2)
def test_response_types_to_grant_types():
req_args = ["code"]
assert set(response_types_to_grant_types(req_args)) == {"authorization_code"}
req_args = ["code", "code id_token"]
assert set(response_types_to_grant_types(req_args)) == {
"authorization_code",
"implicit",
}
req_args = ["code", "id_token code", "code token id_token"]
assert set(response_types_to_grant_types(req_args)) == {
"authorization_code",
"implicit",
}
req_args = ["code", "id_token code", "code token id_token"]
kwargs = {"grant_types": ["refresh_token", "authorization_code"]}
assert set(response_types_to_grant_types(req_args, **kwargs)) == {
"authorization_code",
"implicit",
"refresh_token",
}
with pytest.raises(ValueError):
response_types_to_grant_types(["foobar openid"])
def test_clean_response():
atr = AccessTokenResponse(
access_token="access_token",
token_type="bearer",
expires_in=600,
refresh_token="<PASSWORD>",
steps=39,
stalls="yes",
)
catr = clean_response(atr)
atr_keys = atr.keys()
catr_keys = catr.keys()
assert _eq(
atr_keys,
[
"token_type",
"access_token",
"expires_in",
"refresh_token",
"steps",
"stalls",
],
)
assert _eq(catr_keys, ["token_type", "access_token", "expires_in", "refresh_token"])
class TestOICConsumer:
@pytest.fixture(autouse=True)
def setup_consumer(self, session_db_factory):
client_id = "client_1"
client_config = {
"client_id": client_id,
"client_authn_method": CLIENT_AUTHN_METHOD,
}
self.consumer = Consumer(
DictSessionBackend(), CONFIG, client_config, SERVER_INFO
)
self.consumer.behaviour = {
"request_object_signing_alg": DEF_SIGN_ALG["openid_request_object"]
}
self.consumer.keyjar = CLIKEYS
self.consumer.redirect_uris = ["https://example.com/cb"]
self.consumer.authorization_endpoint = "https://example.com/authorization"
self.consumer.token_endpoint = "https://example.com/token"
self.consumer.userinfo_endpoint = "https://example.com/userinfo" # type: ignore
self.consumer.client_secret = "hemlig"
self.consumer.secret_type = "basic"
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
def test_backup_keys(self):
keys = self.consumer.__dict__.keys()
_dict = self.consumer.dictionary()
dkeys = [key for key in keys if key not in _dict.keys()]
assert _eq(dkeys, IGNORE)
def test_backup_restore(self):
authz_org_url = "http://example.org/authorization"
_dict = sorted(list(self.consumer.__dict__.items()))
self.consumer._backup("sid")
self.consumer.restore("sid")
assert sorted(_dict) == sorted(list(self.consumer.__dict__.items()))
self.consumer.authorization_endpoint = authz_org_url
assert _dict != sorted(list(self.consumer.__dict__.items()))
self.consumer.restore("sid")
assert _dict == sorted(list(self.consumer.__dict__.items()))
def test_backup_restore_update(self):
authz_org_url = "http://example.org/authorization"
self.consumer._backup("sid")
self.consumer.authorization_endpoint = authz_org_url
self.consumer.token_endpoint = "https://example.org/token"
self.consumer.userinfo_endpoint = "" # type: ignore
assert self.consumer.authorization_endpoint == authz_org_url
assert self.consumer.token_endpoint == "https://example.org/token"
assert self.consumer.userinfo_endpoint == "" # type: ignore
self.consumer.update("sid")
assert self.consumer.authorization_endpoint == authz_org_url
assert self.consumer.token_endpoint == "https://example.org/token"
assert (
self.consumer.userinfo_endpoint # type: ignore
== "https://example.com/userinfo"
)
def test_begin(self):
srv = Server()
srv.keyjar = SRVKEYS
sid, location = self.consumer.begin("openid", "code")
authreq = srv.parse_authorization_request(url=location)
assert _eq(
list(authreq.keys()),
[
"state",
"max_age",
"claims",
"response_type",
"client_id",
"scope",
"redirect_uri",
],
)
assert authreq["state"] == sid
assert authreq["scope"] == self.consumer.consumer_config["scope"]
assert authreq["client_id"] == self.consumer.client_id
def test_begin_file(self, tmpdir):
path = tmpdir.strpath
external_path = "/exported"
self.consumer.consumer_config["request_method"] = "file"
self.consumer.consumer_config["temp_dir"] = path
self.consumer.consumer_config["temp_path"] = external_path
self.consumer.consumer_config["authz_page"] = "/authz"
srv = Server()
srv.keyjar = SRVKEYS
sid, location = self.consumer.begin(
"openid", "code", path="http://localhost:8087"
)
with responses.RequestsMock() as rsps:
p = urlparse(self.consumer.request_uri)
assert p.netloc == "localhost:8087"
# Map the URL path to the local path
relative_path = os.path.relpath(p.path, external_path)
file_path = os.path.join(path, relative_path)
with open(file_path) as f:
rsps.add(
rsps.GET,
self.consumer.request_uri,
body=f.read(),
status=200,
content_type="application/urlencoded",
)
authreq = srv.parse_authorization_request(url=location)
assert _eq(
list(authreq.keys()),
[
"max_age",
"state",
"redirect_uri",
"response_type",
"client_id",
"scope",
"claims",
],
)
assert authreq["state"] == sid
assert authreq["scope"] == self.consumer.consumer_config["scope"]
assert authreq["client_id"] == self.consumer.client_id
assert authreq["redirect_uri"].startswith("http://localhost:8087/authz")
def test_complete(self):
_state = "state0"
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
resp = self.consumer.complete(_state)
assert isinstance(resp, AccessTokenResponse)
assert _eq(resp.keys(), ["token_type", "state", "access_token", "scope"])
assert resp["state"] == _state
def test_parse_authz(self):
_state = "state0"
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
self.consumer._backup(_state)
part = self.consumer.parse_authz(query=result.headers["location"])
assert isinstance(part, tuple)
atr = part[0]
assert part[1] is None
assert part[2] is None
assert isinstance(atr, AuthorizationResponse)
assert atr["state"] == _state
assert "code" in atr
def test_parse_authz_implicit(self):
self.consumer.consumer_config["response_type"] = ["token"]
_state = "statxxx"
args = {
"client_id": self.consumer.client_id,
"response_type": "implicit",
"scope": ["openid"],
"redirect_uri": "https://example.com/cb",
}
location = (
"https://example.com/cb?access_token=token&token_type=bearer&state=statxxx"
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
part = self.consumer.parse_authz(query=result.headers["location"])
assert isinstance(part, tuple)
assert part[0] is None
atr = part[1]
assert part[2] is None
assert isinstance(atr, AccessTokenResponse)
assert atr["state"] == _state
assert "access_token" in atr
def test_complete_secret_auth(self):
_state = "state0"
del self.consumer.consumer_config["password"]
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
resp = self.consumer.complete(_state)
assert isinstance(resp, AccessTokenResponse)
assert _eq(resp.keys(), ["token_type", "state", "access_token", "scope"])
assert resp["state"] == _state
def test_complete_auth_token(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["code", "token"]
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
location = (
"https://example.com/cb?code=some_code&state=state0&access_token=token&token_type=bearer"
"&client_id=client_1&scope=openid"
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
self.consumer._backup("state0")
parsed = urlparse(result.headers["location"])
part = self.consumer.parse_authz(query=parsed.query)
assert isinstance(part, tuple)
auth = part[0]
acc = part[1]
assert part[2] is None
assert isinstance(auth, AuthorizationResponse)
assert isinstance(acc, AccessTokenResponse)
assert _eq(
auth.keys(),
["code", "access_token", "token_type", "state", "client_id", "scope"],
)
assert _eq(acc.keys(), ["token_type", "state", "access_token", "scope"])
def test_complete_auth_token_idtoken(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["id_token", "token"]
self.consumer.registration_response = RegistrationResponse(
id_token_signed_response_alg="HS256"
)
self.consumer.authz_req = {} # Store AuthzReq with state as key
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
)
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=[SYMKey(key="hemlig")], algorithm="HS256")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["id_token token"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
part = self.consumer.parse_authz(query=parsed.query)
assert isinstance(part, tuple)
auth = part[0]
atr = part[1]
idt = part[2]
assert auth is None
assert isinstance(atr, AccessTokenResponse)
assert _eq(
atr.keys(),
[
"access_token",
"id_token",
"id_token_jwt",
"token_type",
"state",
"scope",
],
)
assert isinstance(idt, IdToken)
def test_complete_auth_token_idtoken_no_alg_config(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["id_token", "token"]
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
self.consumer.authz_req = {} # Store AuthzReq with state as key
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
)
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=[SYMKey(key="hemlig")], algorithm="HS256")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["id_token token"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
part = self.consumer.parse_authz(query=parsed.query, algs={"sign": "HS256"})
assert isinstance(part, tuple)
auth = part[0]
atr = part[1]
idt = part[2]
assert auth is None
assert isinstance(atr, AccessTokenResponse)
assert _eq(
atr.keys(),
[
"access_token",
"id_token",
"id_token_jwt",
"token_type",
"state",
"scope",
],
)
assert isinstance(idt, IdToken)
def test_complete_auth_token_idtoken_none_cipher_code(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["code"]
self.consumer.registration_response = RegistrationResponse(
id_token_signed_response_alg="none"
)
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
self.consumer.authz_req = {} # Store AuthzReq with state as key
self.consumer.sdb[_state] = {"redirect_uris": []}
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
at_hash="aaa",
)
# Downgrade the algorithm to `none`
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=KC_RSA.keys(), algorithm="none")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["code"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
part = self.consumer.parse_authz(query=parsed.query)
assert isinstance(part, tuple)
auth = part[0]
atr = part[1]
idt = part[2]
assert isinstance(auth, AuthorizationResponse)
assert isinstance(atr, AccessTokenResponse)
assert _eq(
atr.keys(), ["access_token", "id_token", "token_type", "state", "scope"]
)
assert isinstance(idt, IdToken)
def test_complete_auth_token_idtoken_none_cipher_token(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["token"]
self.consumer.registration_response = RegistrationResponse(
id_token_signed_response_alg="none"
)
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
self.consumer.authz_req = {} # Store AuthzReq with state as key
self.consumer.sdb[_state] = {"redirect_uris": []}
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
)
# Downgrade the algorithm to `none`
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=KC_RSA.keys(), algorithm="none")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["token"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
with pytest.raises(WrongSigningAlgorithm):
self.consumer.parse_authz(query=parsed.query)
def test_complete_auth_token_idtoken_cipher_downgrade(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["id_token", "token"]
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
self.consumer.authz_req = {} # Store AuthzReq with state as key
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
)
# Downgrade the algorithm to `none`
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=KC_RSA.keys(), algorithm="none")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["id_token token"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
with pytest.raises(WrongSigningAlgorithm):
self.consumer.parse_authz(query=parsed.query)
def test_userinfo(self):
_state = "state0"
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
rsps.add(
responses.POST,
"https://example.com/userinfo",
content_type="application/json",
json={
"name": "Ilja",
"sub": "some_sub",
"email": "<EMAIL>",
"nickname": "Ilja",
"verified": True,
},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
self.consumer.complete(_state)
result = self.consumer.get_user_info(_state)
assert isinstance(result, OpenIDSchema)
assert _eq(result.keys(), ["name", "email", "verified", "nickname", "sub"])
def test_sign_userinfo(self):
_state = "state0"
self.consumer.client_prefs = {"userinfo_signed_response_alg": "RS256"}
del self.consumer.consumer_config["request_method"]
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
rsps.add(
responses.POST,
"https://example.com/userinfo",
content_type="application/json",
json={
"name": "Ilja",
"sub": "some_sub",
"email": "<EMAIL>",
"nickname": "Ilja",
"verified": True,
},
)
self.consumer.begin("openid", "code")
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
self.consumer.complete(_state)
result = self.consumer.get_user_info(_state)
assert isinstance(result, OpenIDSchema)
assert _eq(result.keys(), ["name", "email", "verified", "nickname", "sub"])
def test_get_userinfo_claims(self):
_state = "state0"
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
rsps.add(
responses.POST,
"https://example.com/userinfo",
content_type="application/json",
json={
"name": "Ilja",
"sub": "some_sub",
"email": "<EMAIL>",
"nickname": "Ilja",
"verified": True,
},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
response = self.consumer.complete(_state)
result = self.consumer.get_userinfo_claims(
response["access_token"],
self.consumer.userinfo_endpoint, # type: ignore
)
assert isinstance(result, OpenIDSchema)
assert _eq(result.keys(), ["name", "email", "verified", "nickname", "sub"])
def real_test_discover(self):
c = Consumer(None, None)
principal = "<EMAIL>"
res = c.discover(principal)
assert isinstance(res, ProviderConfigurationResponse)
assert _eq(
res.keys(),
[
"registration_endpoint",
"scopes_supported",
"identifiers_supported",
"token_endpoint",
"flows_supported",
"version",
"userinfo_endpoint",
"authorization_endpoint",
"x509_url",
"issuer",
],
)
assert res.version == "3.0" # type: ignore
assert _eq(
res.flows_supported, # type: ignore
[
"code",
"token",
"id_token",
"code token",
"code id_token",
"id_token token",
],
)
def test_discover(self):
c = Consumer(None, None)
webfinger = {
"subject": "acct:<EMAIL>",
"links": [
{
"rel": "http://openid.net/specs/connect/1.0/issuer",
"href": "https://localhost:8088/",
}
],
}
principal = "<EMAIL>"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/.well-known/webfinger"
"?resource=acct%3Afoo%40example.com&rel=http%3A%2F%2Fopenid.net%2Fspecs%2Fconnect%2F1.0%2Fissuer",
json=webfinger,
)
res = c.discover(principal)
assert res == "https://localhost:8088/"
def test_client_register(self):
c = Consumer(None, None)
c.redirect_uris = ["https://example.com/authz"]
reg_resp = {
"client_id": "some_client",
"client_secret": "<PASSWORD>",
"client_secret_expires_at": 123456789,
"redirect_uris": ["https://example.com/authz"],
}
with responses.RequestsMock() as rsps:
rsps.add(responses.POST, "https://example.com/register/", json=reg_resp)
c.register("https://example.com/register/")
assert json.loads(str(rsps.calls[0].request.body)) == {
"application_type": "web",
"response_types": ["code"],
"redirect_uris": ["https://example.com/authz"],
"grant_types": ["authorization_code"],
}
assert c.client_id == "some_client"
assert c.client_secret == "<PASSWORD>"
assert c.registration_expires == 123456789
def test_client_register_token(self):
c = Consumer(None, None)
c.redirect_uris = ["https://example.com/authz"]
client_info = {
"client_id": "clientid",
"redirect_uris": ["https://example.com/authz"],
}
with responses.RequestsMock() as rsps:
rsps.add(
rsps.POST,
"https://provider.example.com/registration/",
json=client_info,
)
c.register(
"https://provider.example.com/registration/",
registration_token="<PASSWORD>",
)
header = rsps.calls[0].request.headers["Authorization"]
assert header == "Bearer aW5pdGlhbF9yZWdpc3RyYXRpb25fdG9rZW4="
def test_client_register_token_b64(self):
c = Consumer(None, None)
c.redirect_uris = ["https://example.com/authz"]
client_info = {
"client_id": "clientid",
"redirect_uris": ["https://example.com/authz"],
}
registration_token = (
"<KEY>
<KEY>
<KEY>
<KEY>
<KEY>"
<KEY>
<KEY>"
)
with responses.RequestsMock() as rsps:
rsps.add(
rsps.POST,
"https://provider.example.com/registration/",
json=client_info,
)
c.register(
"https://provider.example.com/registration/",
registration_token=registration_token,
)
header = rsps.calls[0].request.headers["Authorization"]
assert header == "Bearer " + registration_token
def _faulty_id_token(self):
idval = {
"nonce": "KUEYfRM2VzKDaaKD",
"sub": "EndUserSubject",
"iss": "https://alpha.cloud.nds.rub.de",
"exp": 1420823073,
"iat": 1420822473,
"aud": "TestClient",
}
idts = IdToken(**idval)
_signed_jwt = idts.to_jwt(key=[SYMKey(key="TestPassword")], algorithm="HS256")
# Mess with the signed id_token
p = _signed_jwt.split(".")
p[2] = "aaa"
return ".".join(p)
def test_faulty_id_token(self):
_faulty_signed_jwt = self._faulty_id_token()
with pytest.raises(BadSignature):
IdToken().from_jwt(_faulty_signed_jwt, key=[SYMKey(key="TestPassword")])
# What if no verification key is given ?
# Should also result in an exception
with pytest.raises(MissingSigningKey):
IdToken().from_jwt(_faulty_signed_jwt)
def test_faulty_id_token_in_access_token_response(self):
c = Consumer(None, None)
c.keyjar.add_symmetric("", "TestPassword", ["sig"])
_info = {
"access_token": "accessTok",
"id_token": self._faulty_id_token(),
"token_type": "Bearer",
}
_json = json.dumps(_info)
with pytest.raises(ValueError):
c.parse_response(AccessTokenResponse, _json, sformat="json")
def test_faulty_idtoken_from_accesstoken_endpoint(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["id_token"]
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
}
location = (
"https://example.com/cb?state=state0&id_token=<KEY>"
".eyJpc3MiOiAiaHR0cDovL2xvY2FsaG9zdDo4MDg4IiwgInN1YiI6ICJhNWRkMjRiMmYwOGE2ODZmZDM4NmMyMmM"
"zZmY4ZWUyODFlZjJmYmZmMWZkZTcwMDg2NjhjZGEzZGVjZmE0NjY5IiwgImF1ZCI6IFsiY2xpZW50XzEiXSwgImV"
"4cCI6IDE1NzIwOTk5NjAsICJhY3IiOiAiMiIsICJpYXQiOiAxNTcyMDEzNTYwLCAibm9uY2UiOiAibmdFTGZVdmN"
"PMWoyaXNWcXkwQWNwM0NOYlZnMGdFRDEifQ.aaa"
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
self.consumer._backup("state0")
assert result.status_code == 302
query = urlparse(result.headers["location"]).query
with pytest.raises(BadSignature):
self.consumer.parse_authz(query=query)
def test_get_session_management_id(self):
now = utc_time_sans_frac()
smid = "session_management_id"
idval = {
"nonce": "KUEYfRM2VzKDaaKD",
"sub": "EndUserSubject",
"iss": "https://example.com",
"exp": now + 3600,
"iat": now,
"aud": self.consumer.client_id,
"sid": smid,
}
idts = IdToken(**idval)
_signed_jwt = idts.to_jwt(key=KC_RSA.keys(), algorithm="RS256")
_state = "state"
self.consumer.sdb[_state] = {"redirect_uris": ["https://example.org/cb"]}
resp = AuthorizationResponse(id_token=_signed_jwt, state=_state)
self.consumer.consumer_config["response_type"] = ["id_token"]
self.consumer.authz_req[_state] = AccessTokenRequest(nonce="KUEYfRM2VzKDaaKD")
self.consumer.parse_authz(resp.to_urlencoded())
assert self.consumer.sso_db["state"]["smid"] == smid
assert session_get(self.consumer.sso_db, "smid", smid) == [_state]
| import json
import os
from urllib.parse import parse_qs
from urllib.parse import urlparse
import pytest
import responses
from freezegun import freeze_time
from jwkest import BadSignature
from jwkest.jwk import SYMKey
from oic.oauth2.message import MissingSigningKey
from oic.oauth2.message import WrongSigningAlgorithm
from oic.oic import DEF_SIGN_ALG
from oic.oic import Server
from oic.oic import response_types_to_grant_types
from oic.oic.consumer import IGNORE
from oic.oic.consumer import Consumer
from oic.oic.consumer import clean_response
from oic.oic.message import AccessTokenRequest
from oic.oic.message import AccessTokenResponse
from oic.oic.message import AuthorizationResponse
from oic.oic.message import IdToken
from oic.oic.message import OpenIDSchema
from oic.oic.message import ProviderConfigurationResponse
from oic.oic.message import RegistrationResponse
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from oic.utils.keyio import KeyBundle
from oic.utils.keyio import KeyJar
from oic.utils.keyio import keybundle_from_local_file
from oic.utils.sdb import DictSessionBackend
from oic.utils.sdb import session_get
from oic.utils.time_util import utc_time_sans_frac
__author__ = "rohe0002"
KC_SYM_VS = KeyBundle({"kty": "oct", "key": "<KEY>", "use": "ver"})
KC_SYM_S = KeyBundle({"kty": "oct", "key": "<KEY>", "use": "sig"})
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/keys"))
KC_RSA = keybundle_from_local_file(
os.path.join(BASE_PATH, "rsa.key"), "rsa", ["ver", "sig"]
)
SRVKEYS = KeyJar()
SRVKEYS[""] = [KC_RSA]
SRVKEYS["client_1"] = [KC_SYM_VS, KC_RSA]
CLIKEYS = KeyJar()
CLIKEYS["http://localhost:8088"] = [KC_RSA]
CLIKEYS[""] = [KC_RSA, KC_SYM_VS]
CLIKEYS["https://example.com"] = [KC_RSA]
SERVER_INFO = {
"version": "3.0",
"issuer": "https://localhost:8088",
"authorization_endpoint": "http://localhost:8088/authorization",
"token_endpoint": "http://localhost:8088/token",
"userinfo_endpoint": "http://localhost:8088/userinfo",
"flows_supported": ["code", "token"],
}
CONFIG = {
"authz_page": "authz",
"scope": ["openid"],
"response_type": "code",
"request_method": "parameter",
"password": "<PASSWORD>",
"max_age": 3600,
"user_info": {"name": None},
}
def _eq(l1, l2):
return set(l1) == set(l2)
def test_response_types_to_grant_types():
req_args = ["code"]
assert set(response_types_to_grant_types(req_args)) == {"authorization_code"}
req_args = ["code", "code id_token"]
assert set(response_types_to_grant_types(req_args)) == {
"authorization_code",
"implicit",
}
req_args = ["code", "id_token code", "code token id_token"]
assert set(response_types_to_grant_types(req_args)) == {
"authorization_code",
"implicit",
}
req_args = ["code", "id_token code", "code token id_token"]
kwargs = {"grant_types": ["refresh_token", "authorization_code"]}
assert set(response_types_to_grant_types(req_args, **kwargs)) == {
"authorization_code",
"implicit",
"refresh_token",
}
with pytest.raises(ValueError):
response_types_to_grant_types(["foobar openid"])
def test_clean_response():
atr = AccessTokenResponse(
access_token="access_token",
token_type="bearer",
expires_in=600,
refresh_token="<PASSWORD>",
steps=39,
stalls="yes",
)
catr = clean_response(atr)
atr_keys = atr.keys()
catr_keys = catr.keys()
assert _eq(
atr_keys,
[
"token_type",
"access_token",
"expires_in",
"refresh_token",
"steps",
"stalls",
],
)
assert _eq(catr_keys, ["token_type", "access_token", "expires_in", "refresh_token"])
class TestOICConsumer:
@pytest.fixture(autouse=True)
def setup_consumer(self, session_db_factory):
client_id = "client_1"
client_config = {
"client_id": client_id,
"client_authn_method": CLIENT_AUTHN_METHOD,
}
self.consumer = Consumer(
DictSessionBackend(), CONFIG, client_config, SERVER_INFO
)
self.consumer.behaviour = {
"request_object_signing_alg": DEF_SIGN_ALG["openid_request_object"]
}
self.consumer.keyjar = CLIKEYS
self.consumer.redirect_uris = ["https://example.com/cb"]
self.consumer.authorization_endpoint = "https://example.com/authorization"
self.consumer.token_endpoint = "https://example.com/token"
self.consumer.userinfo_endpoint = "https://example.com/userinfo" # type: ignore
self.consumer.client_secret = "hemlig"
self.consumer.secret_type = "basic"
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
def test_backup_keys(self):
keys = self.consumer.__dict__.keys()
_dict = self.consumer.dictionary()
dkeys = [key for key in keys if key not in _dict.keys()]
assert _eq(dkeys, IGNORE)
def test_backup_restore(self):
authz_org_url = "http://example.org/authorization"
_dict = sorted(list(self.consumer.__dict__.items()))
self.consumer._backup("sid")
self.consumer.restore("sid")
assert sorted(_dict) == sorted(list(self.consumer.__dict__.items()))
self.consumer.authorization_endpoint = authz_org_url
assert _dict != sorted(list(self.consumer.__dict__.items()))
self.consumer.restore("sid")
assert _dict == sorted(list(self.consumer.__dict__.items()))
def test_backup_restore_update(self):
authz_org_url = "http://example.org/authorization"
self.consumer._backup("sid")
self.consumer.authorization_endpoint = authz_org_url
self.consumer.token_endpoint = "https://example.org/token"
self.consumer.userinfo_endpoint = "" # type: ignore
assert self.consumer.authorization_endpoint == authz_org_url
assert self.consumer.token_endpoint == "https://example.org/token"
assert self.consumer.userinfo_endpoint == "" # type: ignore
self.consumer.update("sid")
assert self.consumer.authorization_endpoint == authz_org_url
assert self.consumer.token_endpoint == "https://example.org/token"
assert (
self.consumer.userinfo_endpoint # type: ignore
== "https://example.com/userinfo"
)
def test_begin(self):
srv = Server()
srv.keyjar = SRVKEYS
sid, location = self.consumer.begin("openid", "code")
authreq = srv.parse_authorization_request(url=location)
assert _eq(
list(authreq.keys()),
[
"state",
"max_age",
"claims",
"response_type",
"client_id",
"scope",
"redirect_uri",
],
)
assert authreq["state"] == sid
assert authreq["scope"] == self.consumer.consumer_config["scope"]
assert authreq["client_id"] == self.consumer.client_id
def test_begin_file(self, tmpdir):
path = tmpdir.strpath
external_path = "/exported"
self.consumer.consumer_config["request_method"] = "file"
self.consumer.consumer_config["temp_dir"] = path
self.consumer.consumer_config["temp_path"] = external_path
self.consumer.consumer_config["authz_page"] = "/authz"
srv = Server()
srv.keyjar = SRVKEYS
sid, location = self.consumer.begin(
"openid", "code", path="http://localhost:8087"
)
with responses.RequestsMock() as rsps:
p = urlparse(self.consumer.request_uri)
assert p.netloc == "localhost:8087"
# Map the URL path to the local path
relative_path = os.path.relpath(p.path, external_path)
file_path = os.path.join(path, relative_path)
with open(file_path) as f:
rsps.add(
rsps.GET,
self.consumer.request_uri,
body=f.read(),
status=200,
content_type="application/urlencoded",
)
authreq = srv.parse_authorization_request(url=location)
assert _eq(
list(authreq.keys()),
[
"max_age",
"state",
"redirect_uri",
"response_type",
"client_id",
"scope",
"claims",
],
)
assert authreq["state"] == sid
assert authreq["scope"] == self.consumer.consumer_config["scope"]
assert authreq["client_id"] == self.consumer.client_id
assert authreq["redirect_uri"].startswith("http://localhost:8087/authz")
def test_complete(self):
_state = "state0"
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
resp = self.consumer.complete(_state)
assert isinstance(resp, AccessTokenResponse)
assert _eq(resp.keys(), ["token_type", "state", "access_token", "scope"])
assert resp["state"] == _state
def test_parse_authz(self):
_state = "state0"
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
self.consumer._backup(_state)
part = self.consumer.parse_authz(query=result.headers["location"])
assert isinstance(part, tuple)
atr = part[0]
assert part[1] is None
assert part[2] is None
assert isinstance(atr, AuthorizationResponse)
assert atr["state"] == _state
assert "code" in atr
def test_parse_authz_implicit(self):
self.consumer.consumer_config["response_type"] = ["token"]
_state = "statxxx"
args = {
"client_id": self.consumer.client_id,
"response_type": "implicit",
"scope": ["openid"],
"redirect_uri": "https://example.com/cb",
}
location = (
"https://example.com/cb?access_token=token&token_type=bearer&state=statxxx"
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
part = self.consumer.parse_authz(query=result.headers["location"])
assert isinstance(part, tuple)
assert part[0] is None
atr = part[1]
assert part[2] is None
assert isinstance(atr, AccessTokenResponse)
assert atr["state"] == _state
assert "access_token" in atr
def test_complete_secret_auth(self):
_state = "state0"
del self.consumer.consumer_config["password"]
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
resp = self.consumer.complete(_state)
assert isinstance(resp, AccessTokenResponse)
assert _eq(resp.keys(), ["token_type", "state", "access_token", "scope"])
assert resp["state"] == _state
def test_complete_auth_token(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["code", "token"]
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
location = (
"https://example.com/cb?code=some_code&state=state0&access_token=token&token_type=bearer"
"&client_id=client_1&scope=openid"
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
self.consumer._backup("state0")
parsed = urlparse(result.headers["location"])
part = self.consumer.parse_authz(query=parsed.query)
assert isinstance(part, tuple)
auth = part[0]
acc = part[1]
assert part[2] is None
assert isinstance(auth, AuthorizationResponse)
assert isinstance(acc, AccessTokenResponse)
assert _eq(
auth.keys(),
["code", "access_token", "token_type", "state", "client_id", "scope"],
)
assert _eq(acc.keys(), ["token_type", "state", "access_token", "scope"])
def test_complete_auth_token_idtoken(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["id_token", "token"]
self.consumer.registration_response = RegistrationResponse(
id_token_signed_response_alg="HS256"
)
self.consumer.authz_req = {} # Store AuthzReq with state as key
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
)
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=[SYMKey(key="hemlig")], algorithm="HS256")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["id_token token"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
part = self.consumer.parse_authz(query=parsed.query)
assert isinstance(part, tuple)
auth = part[0]
atr = part[1]
idt = part[2]
assert auth is None
assert isinstance(atr, AccessTokenResponse)
assert _eq(
atr.keys(),
[
"access_token",
"id_token",
"id_token_jwt",
"token_type",
"state",
"scope",
],
)
assert isinstance(idt, IdToken)
def test_complete_auth_token_idtoken_no_alg_config(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["id_token", "token"]
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
self.consumer.authz_req = {} # Store AuthzReq with state as key
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
)
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=[SYMKey(key="hemlig")], algorithm="HS256")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["id_token token"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
part = self.consumer.parse_authz(query=parsed.query, algs={"sign": "HS256"})
assert isinstance(part, tuple)
auth = part[0]
atr = part[1]
idt = part[2]
assert auth is None
assert isinstance(atr, AccessTokenResponse)
assert _eq(
atr.keys(),
[
"access_token",
"id_token",
"id_token_jwt",
"token_type",
"state",
"scope",
],
)
assert isinstance(idt, IdToken)
def test_complete_auth_token_idtoken_none_cipher_code(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["code"]
self.consumer.registration_response = RegistrationResponse(
id_token_signed_response_alg="none"
)
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
self.consumer.authz_req = {} # Store AuthzReq with state as key
self.consumer.sdb[_state] = {"redirect_uris": []}
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
at_hash="aaa",
)
# Downgrade the algorithm to `none`
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=KC_RSA.keys(), algorithm="none")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["code"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
part = self.consumer.parse_authz(query=parsed.query)
assert isinstance(part, tuple)
auth = part[0]
atr = part[1]
idt = part[2]
assert isinstance(auth, AuthorizationResponse)
assert isinstance(atr, AccessTokenResponse)
assert _eq(
atr.keys(), ["access_token", "id_token", "token_type", "state", "scope"]
)
assert isinstance(idt, IdToken)
def test_complete_auth_token_idtoken_none_cipher_token(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["token"]
self.consumer.registration_response = RegistrationResponse(
id_token_signed_response_alg="none"
)
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
self.consumer.authz_req = {} # Store AuthzReq with state as key
self.consumer.sdb[_state] = {"redirect_uris": []}
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
)
# Downgrade the algorithm to `none`
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=KC_RSA.keys(), algorithm="none")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["token"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
with pytest.raises(WrongSigningAlgorithm):
self.consumer.parse_authz(query=parsed.query)
def test_complete_auth_token_idtoken_cipher_downgrade(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["id_token", "token"]
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
self.consumer.authz_req = {} # Store AuthzReq with state as key
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
)
# Downgrade the algorithm to `none`
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=KC_RSA.keys(), algorithm="none")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["id_token token"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
with pytest.raises(WrongSigningAlgorithm):
self.consumer.parse_authz(query=parsed.query)
def test_userinfo(self):
_state = "state0"
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
rsps.add(
responses.POST,
"https://example.com/userinfo",
content_type="application/json",
json={
"name": "Ilja",
"sub": "some_sub",
"email": "<EMAIL>",
"nickname": "Ilja",
"verified": True,
},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
self.consumer.complete(_state)
result = self.consumer.get_user_info(_state)
assert isinstance(result, OpenIDSchema)
assert _eq(result.keys(), ["name", "email", "verified", "nickname", "sub"])
def test_sign_userinfo(self):
_state = "state0"
self.consumer.client_prefs = {"userinfo_signed_response_alg": "RS256"}
del self.consumer.consumer_config["request_method"]
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
rsps.add(
responses.POST,
"https://example.com/userinfo",
content_type="application/json",
json={
"name": "Ilja",
"sub": "some_sub",
"email": "<EMAIL>",
"nickname": "Ilja",
"verified": True,
},
)
self.consumer.begin("openid", "code")
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
self.consumer.complete(_state)
result = self.consumer.get_user_info(_state)
assert isinstance(result, OpenIDSchema)
assert _eq(result.keys(), ["name", "email", "verified", "nickname", "sub"])
def test_get_userinfo_claims(self):
_state = "state0"
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
rsps.add(
responses.POST,
"https://example.com/userinfo",
content_type="application/json",
json={
"name": "Ilja",
"sub": "some_sub",
"email": "<EMAIL>",
"nickname": "Ilja",
"verified": True,
},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
response = self.consumer.complete(_state)
result = self.consumer.get_userinfo_claims(
response["access_token"],
self.consumer.userinfo_endpoint, # type: ignore
)
assert isinstance(result, OpenIDSchema)
assert _eq(result.keys(), ["name", "email", "verified", "nickname", "sub"])
def real_test_discover(self):
c = Consumer(None, None)
principal = "<EMAIL>"
res = c.discover(principal)
assert isinstance(res, ProviderConfigurationResponse)
assert _eq(
res.keys(),
[
"registration_endpoint",
"scopes_supported",
"identifiers_supported",
"token_endpoint",
"flows_supported",
"version",
"userinfo_endpoint",
"authorization_endpoint",
"x509_url",
"issuer",
],
)
assert res.version == "3.0" # type: ignore
assert _eq(
res.flows_supported, # type: ignore
[
"code",
"token",
"id_token",
"code token",
"code id_token",
"id_token token",
],
)
def test_discover(self):
c = Consumer(None, None)
webfinger = {
"subject": "acct:<EMAIL>",
"links": [
{
"rel": "http://openid.net/specs/connect/1.0/issuer",
"href": "https://localhost:8088/",
}
],
}
principal = "<EMAIL>"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/.well-known/webfinger"
"?resource=acct%3Afoo%40example.com&rel=http%3A%2F%2Fopenid.net%2Fspecs%2Fconnect%2F1.0%2Fissuer",
json=webfinger,
)
res = c.discover(principal)
assert res == "https://localhost:8088/"
def test_client_register(self):
c = Consumer(None, None)
c.redirect_uris = ["https://example.com/authz"]
reg_resp = {
"client_id": "some_client",
"client_secret": "<PASSWORD>",
"client_secret_expires_at": 123456789,
"redirect_uris": ["https://example.com/authz"],
}
with responses.RequestsMock() as rsps:
rsps.add(responses.POST, "https://example.com/register/", json=reg_resp)
c.register("https://example.com/register/")
assert json.loads(str(rsps.calls[0].request.body)) == {
"application_type": "web",
"response_types": ["code"],
"redirect_uris": ["https://example.com/authz"],
"grant_types": ["authorization_code"],
}
assert c.client_id == "some_client"
assert c.client_secret == "<PASSWORD>"
assert c.registration_expires == 123456789
def test_client_register_token(self):
c = Consumer(None, None)
c.redirect_uris = ["https://example.com/authz"]
client_info = {
"client_id": "clientid",
"redirect_uris": ["https://example.com/authz"],
}
with responses.RequestsMock() as rsps:
rsps.add(
rsps.POST,
"https://provider.example.com/registration/",
json=client_info,
)
c.register(
"https://provider.example.com/registration/",
registration_token="<PASSWORD>",
)
header = rsps.calls[0].request.headers["Authorization"]
assert header == "Bearer aW5pdGlhbF9yZWdpc3RyYXRpb25fdG9rZW4="
def test_client_register_token_b64(self):
c = Consumer(None, None)
c.redirect_uris = ["https://example.com/authz"]
client_info = {
"client_id": "clientid",
"redirect_uris": ["https://example.com/authz"],
}
registration_token = (
"<KEY>
<KEY>
<KEY>
<KEY>
<KEY>"
<KEY>
<KEY>"
)
with responses.RequestsMock() as rsps:
rsps.add(
rsps.POST,
"https://provider.example.com/registration/",
json=client_info,
)
c.register(
"https://provider.example.com/registration/",
registration_token=registration_token,
)
header = rsps.calls[0].request.headers["Authorization"]
assert header == "Bearer " + registration_token
def _faulty_id_token(self):
idval = {
"nonce": "KUEYfRM2VzKDaaKD",
"sub": "EndUserSubject",
"iss": "https://alpha.cloud.nds.rub.de",
"exp": 1420823073,
"iat": 1420822473,
"aud": "TestClient",
}
idts = IdToken(**idval)
_signed_jwt = idts.to_jwt(key=[SYMKey(key="TestPassword")], algorithm="HS256")
# Mess with the signed id_token
p = _signed_jwt.split(".")
p[2] = "aaa"
return ".".join(p)
def test_faulty_id_token(self):
_faulty_signed_jwt = self._faulty_id_token()
with pytest.raises(BadSignature):
IdToken().from_jwt(_faulty_signed_jwt, key=[SYMKey(key="TestPassword")])
# What if no verification key is given ?
# Should also result in an exception
with pytest.raises(MissingSigningKey):
IdToken().from_jwt(_faulty_signed_jwt)
def test_faulty_id_token_in_access_token_response(self):
c = Consumer(None, None)
c.keyjar.add_symmetric("", "TestPassword", ["sig"])
_info = {
"access_token": "accessTok",
"id_token": self._faulty_id_token(),
"token_type": "Bearer",
}
_json = json.dumps(_info)
with pytest.raises(ValueError):
c.parse_response(AccessTokenResponse, _json, sformat="json")
def test_faulty_idtoken_from_accesstoken_endpoint(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["id_token"]
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
}
location = (
"https://example.com/cb?state=state0&id_token=<KEY>"
".eyJpc3MiOiAiaHR0cDovL2xvY2FsaG9zdDo4MDg4IiwgInN1YiI6ICJhNWRkMjRiMmYwOGE2ODZmZDM4NmMyMmM"
"zZmY4ZWUyODFlZjJmYmZmMWZkZTcwMDg2NjhjZGEzZGVjZmE0NjY5IiwgImF1ZCI6IFsiY2xpZW50XzEiXSwgImV"
"4cCI6IDE1NzIwOTk5NjAsICJhY3IiOiAiMiIsICJpYXQiOiAxNTcyMDEzNTYwLCAibm9uY2UiOiAibmdFTGZVdmN"
"PMWoyaXNWcXkwQWNwM0NOYlZnMGdFRDEifQ.aaa"
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
self.consumer._backup("state0")
assert result.status_code == 302
query = urlparse(result.headers["location"]).query
with pytest.raises(BadSignature):
self.consumer.parse_authz(query=query)
def test_get_session_management_id(self):
now = utc_time_sans_frac()
smid = "session_management_id"
idval = {
"nonce": "KUEYfRM2VzKDaaKD",
"sub": "EndUserSubject",
"iss": "https://example.com",
"exp": now + 3600,
"iat": now,
"aud": self.consumer.client_id,
"sid": smid,
}
idts = IdToken(**idval)
_signed_jwt = idts.to_jwt(key=KC_RSA.keys(), algorithm="RS256")
_state = "state"
self.consumer.sdb[_state] = {"redirect_uris": ["https://example.org/cb"]}
resp = AuthorizationResponse(id_token=_signed_jwt, state=_state)
self.consumer.consumer_config["response_type"] = ["id_token"]
self.consumer.authz_req[_state] = AccessTokenRequest(nonce="KUEYfRM2VzKDaaKD")
self.consumer.parse_authz(resp.to_urlencoded())
assert self.consumer.sso_db["state"]["smid"] == smid
assert session_get(self.consumer.sso_db, "smid", smid) == [_state] | en | 0.845775 | # type: ignore # abs min # type: ignore # type: ignore # type: ignore # Map the URL path to the local path # Store AuthzReq with state as key # abs min # Store AuthzReq with state as key # abs min # Store AuthzReq with state as key # Downgrade the algorithm to `none` # abs min # Store AuthzReq with state as key # Downgrade the algorithm to `none` # abs min # Store AuthzReq with state as key # Downgrade the algorithm to `none` # type: ignore # type: ignore # type: ignore # Mess with the signed id_token # What if no verification key is given ? # Should also result in an exception | 1.796626 | 2 |
setup.py | CristianPachacama/cartoframes | 1 | 10689 | <reponame>CristianPachacama/cartoframes<filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import io
from codecs import open
from setuptools import setup, find_packages
def walk_subpkg(name):
data_files = []
package_dir = 'cartoframes'
for parent, dirs, files in os.walk(os.path.join(package_dir, name)):
# Remove package_dir from the path.
sub_dir = os.sep.join(parent.split(os.sep)[1:])
for f in files:
data_files.append(os.path.join(sub_dir, f))
return data_files
REQUIRES = [
'appdirs>=1.4.3,<2.0',
'carto>=1.6.0,<2.0',
'jinja2>=2.10.1,<3.0',
'pandas>=0.24.2<1.0',
'shapely>=1.6.4,<2.0',
'tqdm>=4.32.1,<5.0',
'unidecode>=1.1.0,<2.0',
'webcolors>=1.9.1,<2.0'
]
PACKAGE_DATA = {
'': [
'LICENSE',
'CONTRIBUTORS',
],
'cartoframes': [
'assets/*',
'assets/*.j2'
] + walk_subpkg('assets'),
}
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
about = {}
with open(os.path.join(here, 'cartoframes', '__version__.py'), 'r', 'utf-8') as f:
exec(f.read(), about)
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=long_description,
url=about['__url__'],
author=about['__author__'],
author_email=about['__email__'],
license=about['__license__'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
keywords='carto data science maps spatial pandas',
packages=find_packages(),
install_requires=REQUIRES,
python_requires=">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
include_package_data=True,
package_dir={'cartoframes': 'cartoframes'},
package_data=PACKAGE_DATA,
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import io
from codecs import open
from setuptools import setup, find_packages
def walk_subpkg(name):
data_files = []
package_dir = 'cartoframes'
for parent, dirs, files in os.walk(os.path.join(package_dir, name)):
# Remove package_dir from the path.
sub_dir = os.sep.join(parent.split(os.sep)[1:])
for f in files:
data_files.append(os.path.join(sub_dir, f))
return data_files
REQUIRES = [
'appdirs>=1.4.3,<2.0',
'carto>=1.6.0,<2.0',
'jinja2>=2.10.1,<3.0',
'pandas>=0.24.2<1.0',
'shapely>=1.6.4,<2.0',
'tqdm>=4.32.1,<5.0',
'unidecode>=1.1.0,<2.0',
'webcolors>=1.9.1,<2.0'
]
PACKAGE_DATA = {
'': [
'LICENSE',
'CONTRIBUTORS',
],
'cartoframes': [
'assets/*',
'assets/*.j2'
] + walk_subpkg('assets'),
}
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
about = {}
with open(os.path.join(here, 'cartoframes', '__version__.py'), 'r', 'utf-8') as f:
exec(f.read(), about)
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=long_description,
url=about['__url__'],
author=about['__author__'],
author_email=about['__email__'],
license=about['__license__'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
keywords='carto data science maps spatial pandas',
packages=find_packages(),
install_requires=REQUIRES,
python_requires=">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
include_package_data=True,
package_dir={'cartoframes': 'cartoframes'},
package_data=PACKAGE_DATA,
) | en | 0.65402 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Remove package_dir from the path. | 1.902397 | 2 |
Tests/test_BioSQL_mysql_connector_online.py | bioinf-mcb/biopython | 2 | 10690 | <filename>Tests/test_BioSQL_mysql_connector_online.py
#!/usr/bin/env python
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Run BioSQL tests using MySQL."""
import unittest
# Really do want "import *" to get all the test clases:
from common_BioSQL import * # noqa: F403
from common_BioSQL_online import * # noqa: F403
# Import these explicitly to avoid flake8 F405 below:
from common_BioSQL import load_biosql_ini, check_config
from common_BioSQL_online import share_config
import requires_internet
requires_internet.check()
DBDRIVER = "mysql.connector"
DBTYPE = "mysql"
DBHOST, DBUSER, DBPASSWD, TESTDB = load_biosql_ini(DBTYPE)
# This will abort if driver not installed etc:
check_config(DBDRIVER, DBTYPE, DBHOST, DBUSER, DBPASSWD, TESTDB)
share_config(DBDRIVER, DBTYPE, DBHOST, DBUSER, DBPASSWD, TESTDB)
if __name__ == "__main__":
# Run the test cases
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| <filename>Tests/test_BioSQL_mysql_connector_online.py
#!/usr/bin/env python
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Run BioSQL tests using MySQL."""
import unittest
# Really do want "import *" to get all the test clases:
from common_BioSQL import * # noqa: F403
from common_BioSQL_online import * # noqa: F403
# Import these explicitly to avoid flake8 F405 below:
from common_BioSQL import load_biosql_ini, check_config
from common_BioSQL_online import share_config
import requires_internet
requires_internet.check()
DBDRIVER = "mysql.connector"
DBTYPE = "mysql"
DBHOST, DBUSER, DBPASSWD, TESTDB = load_biosql_ini(DBTYPE)
# This will abort if driver not installed etc:
check_config(DBDRIVER, DBTYPE, DBHOST, DBUSER, DBPASSWD, TESTDB)
share_config(DBDRIVER, DBTYPE, DBHOST, DBUSER, DBPASSWD, TESTDB)
if __name__ == "__main__":
# Run the test cases
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| en | 0.893456 | #!/usr/bin/env python # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. Run BioSQL tests using MySQL. # Really do want "import *" to get all the test clases: # noqa: F403 # noqa: F403 # Import these explicitly to avoid flake8 F405 below: # This will abort if driver not installed etc: # Run the test cases | 1.905407 | 2 |
kalachakra/saraswati/migrations/0004_ritual_people_name.py | tony-mikhailov/Kalachakra | 0 | 10691 | <filename>kalachakra/saraswati/migrations/0004_ritual_people_name.py
# Generated by Django 2.2.6 on 2020-04-05 07:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('saraswati', '0003_auto_20200402_1918'),
]
operations = [
migrations.AddField(
model_name='ritual',
name='people_name',
field=models.TextField(blank=True, default=None, max_length=108, null=True),
),
]
| <filename>kalachakra/saraswati/migrations/0004_ritual_people_name.py
# Generated by Django 2.2.6 on 2020-04-05 07:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('saraswati', '0003_auto_20200402_1918'),
]
operations = [
migrations.AddField(
model_name='ritual',
name='people_name',
field=models.TextField(blank=True, default=None, max_length=108, null=True),
),
]
| en | 0.835019 | # Generated by Django 2.2.6 on 2020-04-05 07:50 | 1.49402 | 1 |
src/toil/jobStores/abstractJobStore.py | adamnovak/toil | 0 | 10692 | # Copyright (C) 2015 UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import re
try:
import cPickle
except ImportError:
import pickle as cPickle
class NoSuchJobException( Exception ):
def __init__( self, jobStoreID ):
super( NoSuchJobException, self ).__init__( "The job '%s' does not exist" % jobStoreID )
class ConcurrentFileModificationException( Exception ):
def __init__( self, jobStoreFileID ):
super( ConcurrentFileModificationException, self ).__init__(
'Concurrent update to file %s detected.' % jobStoreFileID )
class NoSuchFileException( Exception ):
def __init__( self, fileJobStoreID ):
super( NoSuchFileException, self ).__init__( "The file '%s' does not exist" % fileJobStoreID )
class JobStoreCreationException( Exception ):
def __init__( self, message ):
super( JobStoreCreationException, self ).__init__( message )
class AbstractJobStore( object ):
"""
Represents the physical storage for the jobs and associated files in a toil.
"""
__metaclass__ = ABCMeta
def __init__( self, config=None ):
"""
:param config: If config is not None then the
given configuration object will be written to the shared file "config.pickle" which can
later be retrieved using the readSharedFileStream. See writeConfigToStore.
If this file already exists it will be overwritten. If config is None,
the shared file "config.pickle" is assumed to exist and is retrieved. See loadConfigFromStore.
"""
#Now get on with reading or writing the config
if config is None:
with self.readSharedFileStream( "config.pickle", isProtected=False ) as fileHandle:
self.__config = cPickle.load(fileHandle)
else:
self.__config = config
self.writeConfigToStore()
def writeConfigToStore(self):
"""
Re-writes the config attribute to the jobStore, so that its values can be retrieved
if the jobStore is reloaded.
"""
with self.writeSharedFileStream( "config.pickle", isProtected=False ) as fileHandle:
cPickle.dump(self.__config, fileHandle, cPickle.HIGHEST_PROTOCOL)
@property
def config( self ):
return self.__config
@staticmethod
def _checkJobStoreCreation(create, exists, jobStoreString):
"""
Consistency checks which will result in exceptions if we attempt to overwrite an existing
jobStore.
:type create: boolean
:type exists: boolean
:raise JobStoreCreationException: Thrown if create=True and exists=True or create=False
and exists=False
"""
if create and exists:
raise JobStoreCreationException("The job store '%s' already exists. "
"Use --restart or 'toil restart' to resume this jobStore, "
"else remove it to start from scratch" % jobStoreString)
if not create and not exists:
raise JobStoreCreationException("The job store '%s' does not exist, so there "
"is nothing to restart." % jobStoreString)
@abstractmethod
def deleteJobStore( self ):
"""
Removes the jobStore from the disk/store. Careful!
"""
raise NotImplementedError( )
##Cleanup functions
def clean(self):
"""
Function to cleanup the state of a jobStore after a restart.
Fixes jobs that might have been partially updated.
Resets the try counts.
"""
#Collate any jobs that were in the process of being created/deleted
jobsToDelete = set()
for job in self.jobs():
for updateID in job.jobsToDelete:
jobsToDelete.add(updateID)
#Delete the jobs that should be deleted
if len(jobsToDelete) > 0:
for job in self.jobs():
if job.updateID in jobsToDelete:
self.delete(job.jobStoreID)
#Cleanup the state of each job
for job in self.jobs():
changed = False #Flag to indicate if we need to update the job
#on disk
if len(job.jobsToDelete) != 0:
job.jobsToDelete = set()
changed = True
#While jobs at the end of the stack are already deleted remove
#those jobs from the stack (this cleans up the case that the job
#had successors to run, but had not been updated to reflect this)
while len(job.stack) > 0:
jobs = [ command for command in job.stack[-1] if self.exists(command[0]) ]
if len(jobs) < len(job.stack[-1]):
changed = True
if len(jobs) > 0:
job.stack[-1] = jobs
break
else:
job.stack.pop()
else:
break
#Reset the retry count of the job
if job.remainingRetryCount < self._defaultTryCount():
job.remainingRetryCount = self._defaultTryCount()
changed = True
#This cleans the old log file which may
#have been left if the job is being retried after a job failure.
if job.logJobStoreFileID != None:
job.clearLogFile(self)
changed = True
if changed: #Update, but only if a change has occurred
self.update(job)
#Remove any crufty stats/logging files from the previous run
self.readStatsAndLogging(lambda x : None)
##########################################
#The following methods deal with creating/loading/updating/writing/checking for the
#existence of jobs
##########################################
@abstractmethod
def create( self, command, memory, cores, disk, updateID=None,
predecessorNumber=0 ):
"""
Creates a job, adding it to the store.
Command, memory, cores, updateID, predecessorNumber
are all arguments to the job's constructor.
:rtype : toil.jobWrapper.JobWrapper
"""
raise NotImplementedError( )
@abstractmethod
def exists( self, jobStoreID ):
"""
Returns true if the job is in the store, else false.
:rtype : bool
"""
raise NotImplementedError( )
@abstractmethod
def getPublicUrl( self, FileName):
"""
Returns a publicly accessible URL to the given file in the job store.
The returned URL starts with 'http:', 'https:' or 'file:'.
The returned URL may expire as early as 1h after its been returned.
Throw an exception if the file does not exist.
:param jobStoreFileID:
:return:
"""
raise NotImplementedError()
@abstractmethod
def getSharedPublicUrl( self, jobStoreFileID):
"""
Returns a publicly accessible URL to the given file in the job store.
The returned URL starts with 'http:', 'https:' or 'file:'.
The returned URL may expire as early as 1h after its been returned.
Throw an exception if the file does not exist.
:param jobStoreFileID:
:return:
"""
raise NotImplementedError()
@abstractmethod
def load( self, jobStoreID ):
"""
Loads a job for the given jobStoreID and returns it.
:rtype: toil.jobWrapper.JobWrapper
:raises: NoSuchJobException if there is no job with the given jobStoreID
"""
raise NotImplementedError( )
@abstractmethod
def update( self, job ):
"""
Persists the job in this store atomically.
"""
raise NotImplementedError( )
@abstractmethod
def delete( self, jobStoreID ):
"""
Removes from store atomically, can not then subsequently call load(), write(), update(),
etc. with the job.
This operation is idempotent, i.e. deleting a job twice or deleting a non-existent job
will succeed silently.
"""
raise NotImplementedError( )
def jobs(self):
"""
Returns iterator on the jobs in the store.
:rtype : iterator
"""
raise NotImplementedError( )
##########################################
#The following provide an way of creating/reading/writing/updating files
#associated with a given job.
##########################################
@abstractmethod
def writeFile( self, localFilePath, jobStoreID=None ):
"""
Takes a file (as a path) and places it in this job store. Returns an ID that can be used
to retrieve the file at a later time.
jobStoreID is the id of a job, or None. If specified, when delete(job)
is called all files written with the given job.jobStoreID will be
removed from the jobStore.
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def writeFileStream( self, jobStoreID=None ):
"""
Similar to writeFile, but returns a context manager yielding a tuple of
1) a file handle which can be written to and 2) the ID of the resulting
file in the job store. The yielded file handle does not need to and
should not be closed explicitly.
"""
raise NotImplementedError( )
@abstractmethod
def getEmptyFileStoreID( self, jobStoreID=None ):
"""
:rtype : string, the ID of a new, empty file.
jobStoreID is the id of a job, or None. If specified, when delete(job)
is called all files written with the given job.jobStoreID will be
removed from the jobStore.
Call to fileExists(getEmptyFileStoreID(jobStoreID)) will return True.
"""
raise NotImplementedError( )
@abstractmethod
def readFile( self, jobStoreFileID, localFilePath ):
"""
Copies the file referenced by jobStoreFileID to the given local file path. The version
will be consistent with the last copy of the file written/updated.
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def readFileStream( self, jobStoreFileID ):
"""
Similar to readFile, but returns a context manager yielding a file handle which can be
read from. The yielded file handle does not need to and should not be closed explicitly.
"""
raise NotImplementedError( )
@abstractmethod
def deleteFile( self, jobStoreFileID ):
"""
Deletes the file with the given ID from this job store.
This operation is idempotent, i.e. deleting a file twice or deleting a non-existent file
will succeed silently.
"""
raise NotImplementedError( )
@abstractmethod
def fileExists(self, jobStoreFileID ):
"""
:rtype : True if the jobStoreFileID exists in the jobStore, else False
"""
raise NotImplementedError()
@abstractmethod
def updateFile( self, jobStoreFileID, localFilePath ):
"""
Replaces the existing version of a file in the jobStore. Throws an exception if the file
does not exist.
:raises ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
"""
raise NotImplementedError( )
##########################################
#The following methods deal with shared files, i.e. files not associated
#with specific jobs.
##########################################
sharedFileNameRegex = re.compile( r'^[a-zA-Z0-9._-]+$' )
# FIXME: Rename to updateSharedFileStream
@abstractmethod
@contextmanager
def writeSharedFileStream( self, sharedFileName, isProtected=True ):
"""
Returns a context manager yielding a writable file handle to the global file referenced
by the given name.
:param sharedFileName: A file name matching AbstractJobStore.fileNameRegex, unique within
the physical storage represented by this job store
:raises ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def readSharedFileStream( self, sharedFileName, isProtected=True ):
"""
Returns a context manager yielding a readable file handle to the global file referenced
by the given name.
"""
raise NotImplementedError( )
@abstractmethod
def writeStatsAndLogging( self, statsAndLoggingString ):
"""
Adds the given statistics/logging string to the store of statistics info.
"""
raise NotImplementedError( )
@abstractmethod
def readStatsAndLogging( self, statsAndLoggingCallBackFn):
"""
Reads stats/logging strings accumulated by "writeStatsAndLogging" function.
For each stats/logging file calls the statsAndLoggingCallBackFn with
an open, readable file-handle that can be used to parse the stats.
Returns the number of stat/logging strings processed.
Stats/logging files are only read once and are removed from the
file store after being written to the given file handle.
"""
raise NotImplementedError( )
## Helper methods for subclasses
def _defaultTryCount( self ):
return int( self.config.retryCount+1 )
@classmethod
def _validateSharedFileName( cls, sharedFileName ):
return bool( cls.sharedFileNameRegex.match( sharedFileName ) )
| # Copyright (C) 2015 UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import re
try:
import cPickle
except ImportError:
import pickle as cPickle
class NoSuchJobException( Exception ):
def __init__( self, jobStoreID ):
super( NoSuchJobException, self ).__init__( "The job '%s' does not exist" % jobStoreID )
class ConcurrentFileModificationException( Exception ):
def __init__( self, jobStoreFileID ):
super( ConcurrentFileModificationException, self ).__init__(
'Concurrent update to file %s detected.' % jobStoreFileID )
class NoSuchFileException( Exception ):
def __init__( self, fileJobStoreID ):
super( NoSuchFileException, self ).__init__( "The file '%s' does not exist" % fileJobStoreID )
class JobStoreCreationException( Exception ):
def __init__( self, message ):
super( JobStoreCreationException, self ).__init__( message )
class AbstractJobStore( object ):
"""
Represents the physical storage for the jobs and associated files in a toil.
"""
__metaclass__ = ABCMeta
def __init__( self, config=None ):
"""
:param config: If config is not None then the
given configuration object will be written to the shared file "config.pickle" which can
later be retrieved using the readSharedFileStream. See writeConfigToStore.
If this file already exists it will be overwritten. If config is None,
the shared file "config.pickle" is assumed to exist and is retrieved. See loadConfigFromStore.
"""
#Now get on with reading or writing the config
if config is None:
with self.readSharedFileStream( "config.pickle", isProtected=False ) as fileHandle:
self.__config = cPickle.load(fileHandle)
else:
self.__config = config
self.writeConfigToStore()
def writeConfigToStore(self):
"""
Re-writes the config attribute to the jobStore, so that its values can be retrieved
if the jobStore is reloaded.
"""
with self.writeSharedFileStream( "config.pickle", isProtected=False ) as fileHandle:
cPickle.dump(self.__config, fileHandle, cPickle.HIGHEST_PROTOCOL)
@property
def config( self ):
return self.__config
@staticmethod
def _checkJobStoreCreation(create, exists, jobStoreString):
"""
Consistency checks which will result in exceptions if we attempt to overwrite an existing
jobStore.
:type create: boolean
:type exists: boolean
:raise JobStoreCreationException: Thrown if create=True and exists=True or create=False
and exists=False
"""
if create and exists:
raise JobStoreCreationException("The job store '%s' already exists. "
"Use --restart or 'toil restart' to resume this jobStore, "
"else remove it to start from scratch" % jobStoreString)
if not create and not exists:
raise JobStoreCreationException("The job store '%s' does not exist, so there "
"is nothing to restart." % jobStoreString)
@abstractmethod
def deleteJobStore( self ):
"""
Removes the jobStore from the disk/store. Careful!
"""
raise NotImplementedError( )
##Cleanup functions
def clean(self):
"""
Function to cleanup the state of a jobStore after a restart.
Fixes jobs that might have been partially updated.
Resets the try counts.
"""
#Collate any jobs that were in the process of being created/deleted
jobsToDelete = set()
for job in self.jobs():
for updateID in job.jobsToDelete:
jobsToDelete.add(updateID)
#Delete the jobs that should be deleted
if len(jobsToDelete) > 0:
for job in self.jobs():
if job.updateID in jobsToDelete:
self.delete(job.jobStoreID)
#Cleanup the state of each job
for job in self.jobs():
changed = False #Flag to indicate if we need to update the job
#on disk
if len(job.jobsToDelete) != 0:
job.jobsToDelete = set()
changed = True
#While jobs at the end of the stack are already deleted remove
#those jobs from the stack (this cleans up the case that the job
#had successors to run, but had not been updated to reflect this)
while len(job.stack) > 0:
jobs = [ command for command in job.stack[-1] if self.exists(command[0]) ]
if len(jobs) < len(job.stack[-1]):
changed = True
if len(jobs) > 0:
job.stack[-1] = jobs
break
else:
job.stack.pop()
else:
break
#Reset the retry count of the job
if job.remainingRetryCount < self._defaultTryCount():
job.remainingRetryCount = self._defaultTryCount()
changed = True
#This cleans the old log file which may
#have been left if the job is being retried after a job failure.
if job.logJobStoreFileID != None:
job.clearLogFile(self)
changed = True
if changed: #Update, but only if a change has occurred
self.update(job)
#Remove any crufty stats/logging files from the previous run
self.readStatsAndLogging(lambda x : None)
##########################################
#The following methods deal with creating/loading/updating/writing/checking for the
#existence of jobs
##########################################
@abstractmethod
def create( self, command, memory, cores, disk, updateID=None,
predecessorNumber=0 ):
"""
Creates a job, adding it to the store.
Command, memory, cores, updateID, predecessorNumber
are all arguments to the job's constructor.
:rtype : toil.jobWrapper.JobWrapper
"""
raise NotImplementedError( )
@abstractmethod
def exists( self, jobStoreID ):
"""
Returns true if the job is in the store, else false.
:rtype : bool
"""
raise NotImplementedError( )
@abstractmethod
def getPublicUrl( self, FileName):
"""
Returns a publicly accessible URL to the given file in the job store.
The returned URL starts with 'http:', 'https:' or 'file:'.
The returned URL may expire as early as 1h after its been returned.
Throw an exception if the file does not exist.
:param jobStoreFileID:
:return:
"""
raise NotImplementedError()
@abstractmethod
def getSharedPublicUrl( self, jobStoreFileID):
"""
Returns a publicly accessible URL to the given file in the job store.
The returned URL starts with 'http:', 'https:' or 'file:'.
The returned URL may expire as early as 1h after its been returned.
Throw an exception if the file does not exist.
:param jobStoreFileID:
:return:
"""
raise NotImplementedError()
@abstractmethod
def load( self, jobStoreID ):
"""
Loads a job for the given jobStoreID and returns it.
:rtype: toil.jobWrapper.JobWrapper
:raises: NoSuchJobException if there is no job with the given jobStoreID
"""
raise NotImplementedError( )
@abstractmethod
def update( self, job ):
"""
Persists the job in this store atomically.
"""
raise NotImplementedError( )
@abstractmethod
def delete( self, jobStoreID ):
"""
Removes from store atomically, can not then subsequently call load(), write(), update(),
etc. with the job.
This operation is idempotent, i.e. deleting a job twice or deleting a non-existent job
will succeed silently.
"""
raise NotImplementedError( )
def jobs(self):
"""
Returns iterator on the jobs in the store.
:rtype : iterator
"""
raise NotImplementedError( )
##########################################
#The following provide an way of creating/reading/writing/updating files
#associated with a given job.
##########################################
@abstractmethod
def writeFile( self, localFilePath, jobStoreID=None ):
"""
Takes a file (as a path) and places it in this job store. Returns an ID that can be used
to retrieve the file at a later time.
jobStoreID is the id of a job, or None. If specified, when delete(job)
is called all files written with the given job.jobStoreID will be
removed from the jobStore.
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def writeFileStream( self, jobStoreID=None ):
"""
Similar to writeFile, but returns a context manager yielding a tuple of
1) a file handle which can be written to and 2) the ID of the resulting
file in the job store. The yielded file handle does not need to and
should not be closed explicitly.
"""
raise NotImplementedError( )
@abstractmethod
def getEmptyFileStoreID( self, jobStoreID=None ):
"""
:rtype : string, the ID of a new, empty file.
jobStoreID is the id of a job, or None. If specified, when delete(job)
is called all files written with the given job.jobStoreID will be
removed from the jobStore.
Call to fileExists(getEmptyFileStoreID(jobStoreID)) will return True.
"""
raise NotImplementedError( )
@abstractmethod
def readFile( self, jobStoreFileID, localFilePath ):
"""
Copies the file referenced by jobStoreFileID to the given local file path. The version
will be consistent with the last copy of the file written/updated.
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def readFileStream( self, jobStoreFileID ):
"""
Similar to readFile, but returns a context manager yielding a file handle which can be
read from. The yielded file handle does not need to and should not be closed explicitly.
"""
raise NotImplementedError( )
@abstractmethod
def deleteFile( self, jobStoreFileID ):
"""
Deletes the file with the given ID from this job store.
This operation is idempotent, i.e. deleting a file twice or deleting a non-existent file
will succeed silently.
"""
raise NotImplementedError( )
@abstractmethod
def fileExists(self, jobStoreFileID ):
"""
:rtype : True if the jobStoreFileID exists in the jobStore, else False
"""
raise NotImplementedError()
@abstractmethod
def updateFile( self, jobStoreFileID, localFilePath ):
"""
Replaces the existing version of a file in the jobStore. Throws an exception if the file
does not exist.
:raises ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
"""
raise NotImplementedError( )
##########################################
#The following methods deal with shared files, i.e. files not associated
#with specific jobs.
##########################################
sharedFileNameRegex = re.compile( r'^[a-zA-Z0-9._-]+$' )
# FIXME: Rename to updateSharedFileStream
@abstractmethod
@contextmanager
def writeSharedFileStream( self, sharedFileName, isProtected=True ):
"""
Returns a context manager yielding a writable file handle to the global file referenced
by the given name.
:param sharedFileName: A file name matching AbstractJobStore.fileNameRegex, unique within
the physical storage represented by this job store
:raises ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def readSharedFileStream( self, sharedFileName, isProtected=True ):
"""
Returns a context manager yielding a readable file handle to the global file referenced
by the given name.
"""
raise NotImplementedError( )
@abstractmethod
def writeStatsAndLogging( self, statsAndLoggingString ):
"""
Adds the given statistics/logging string to the store of statistics info.
"""
raise NotImplementedError( )
@abstractmethod
def readStatsAndLogging( self, statsAndLoggingCallBackFn):
"""
Reads stats/logging strings accumulated by "writeStatsAndLogging" function.
For each stats/logging file calls the statsAndLoggingCallBackFn with
an open, readable file-handle that can be used to parse the stats.
Returns the number of stat/logging strings processed.
Stats/logging files are only read once and are removed from the
file store after being written to the given file handle.
"""
raise NotImplementedError( )
## Helper methods for subclasses
def _defaultTryCount( self ):
return int( self.config.retryCount+1 )
@classmethod
def _validateSharedFileName( cls, sharedFileName ):
return bool( cls.sharedFileNameRegex.match( sharedFileName ) )
| en | 0.871181 | # Copyright (C) 2015 UCSC Computational Genomics Lab # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Represents the physical storage for the jobs and associated files in a toil. :param config: If config is not None then the given configuration object will be written to the shared file "config.pickle" which can later be retrieved using the readSharedFileStream. See writeConfigToStore. If this file already exists it will be overwritten. If config is None, the shared file "config.pickle" is assumed to exist and is retrieved. See loadConfigFromStore. #Now get on with reading or writing the config Re-writes the config attribute to the jobStore, so that its values can be retrieved if the jobStore is reloaded. Consistency checks which will result in exceptions if we attempt to overwrite an existing jobStore. :type create: boolean :type exists: boolean :raise JobStoreCreationException: Thrown if create=True and exists=True or create=False and exists=False Removes the jobStore from the disk/store. Careful! ##Cleanup functions Function to cleanup the state of a jobStore after a restart. Fixes jobs that might have been partially updated. Resets the try counts. #Collate any jobs that were in the process of being created/deleted #Delete the jobs that should be deleted #Cleanup the state of each job #Flag to indicate if we need to update the job #on disk #While jobs at the end of the stack are already deleted remove #those jobs from the stack (this cleans up the case that the job #had successors to run, but had not been updated to reflect this) #Reset the retry count of the job #This cleans the old log file which may #have been left if the job is being retried after a job failure. #Update, but only if a change has occurred #Remove any crufty stats/logging files from the previous run ########################################## #The following methods deal with creating/loading/updating/writing/checking for the #existence of jobs ########################################## Creates a job, adding it to the store. Command, memory, cores, updateID, predecessorNumber are all arguments to the job's constructor. :rtype : toil.jobWrapper.JobWrapper Returns true if the job is in the store, else false. :rtype : bool Returns a publicly accessible URL to the given file in the job store. The returned URL starts with 'http:', 'https:' or 'file:'. The returned URL may expire as early as 1h after its been returned. Throw an exception if the file does not exist. :param jobStoreFileID: :return: Returns a publicly accessible URL to the given file in the job store. The returned URL starts with 'http:', 'https:' or 'file:'. The returned URL may expire as early as 1h after its been returned. Throw an exception if the file does not exist. :param jobStoreFileID: :return: Loads a job for the given jobStoreID and returns it. :rtype: toil.jobWrapper.JobWrapper :raises: NoSuchJobException if there is no job with the given jobStoreID Persists the job in this store atomically. Removes from store atomically, can not then subsequently call load(), write(), update(), etc. with the job. This operation is idempotent, i.e. deleting a job twice or deleting a non-existent job will succeed silently. Returns iterator on the jobs in the store. :rtype : iterator ########################################## #The following provide an way of creating/reading/writing/updating files #associated with a given job. ########################################## Takes a file (as a path) and places it in this job store. Returns an ID that can be used to retrieve the file at a later time. jobStoreID is the id of a job, or None. If specified, when delete(job) is called all files written with the given job.jobStoreID will be removed from the jobStore. Similar to writeFile, but returns a context manager yielding a tuple of 1) a file handle which can be written to and 2) the ID of the resulting file in the job store. The yielded file handle does not need to and should not be closed explicitly. :rtype : string, the ID of a new, empty file. jobStoreID is the id of a job, or None. If specified, when delete(job) is called all files written with the given job.jobStoreID will be removed from the jobStore. Call to fileExists(getEmptyFileStoreID(jobStoreID)) will return True. Copies the file referenced by jobStoreFileID to the given local file path. The version will be consistent with the last copy of the file written/updated. Similar to readFile, but returns a context manager yielding a file handle which can be read from. The yielded file handle does not need to and should not be closed explicitly. Deletes the file with the given ID from this job store. This operation is idempotent, i.e. deleting a file twice or deleting a non-existent file will succeed silently. :rtype : True if the jobStoreFileID exists in the jobStore, else False Replaces the existing version of a file in the jobStore. Throws an exception if the file does not exist. :raises ConcurrentFileModificationException: if the file was modified concurrently during an invocation of this method ########################################## #The following methods deal with shared files, i.e. files not associated #with specific jobs. ########################################## # FIXME: Rename to updateSharedFileStream Returns a context manager yielding a writable file handle to the global file referenced by the given name. :param sharedFileName: A file name matching AbstractJobStore.fileNameRegex, unique within the physical storage represented by this job store :raises ConcurrentFileModificationException: if the file was modified concurrently during an invocation of this method Returns a context manager yielding a readable file handle to the global file referenced by the given name. Adds the given statistics/logging string to the store of statistics info. Reads stats/logging strings accumulated by "writeStatsAndLogging" function. For each stats/logging file calls the statsAndLoggingCallBackFn with an open, readable file-handle that can be used to parse the stats. Returns the number of stat/logging strings processed. Stats/logging files are only read once and are removed from the file store after being written to the given file handle. ## Helper methods for subclasses | 1.768173 | 2 |
dashboard.py | TheCrypticMusic/COVID-19 | 0 | 10693 | from datetime import date
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.express as px
from dash.dependencies import Input, Output
test_data = pd.read_csv("data/world_data.csv")
today = date.today()
external_stylesheets = [dbc.themes.BOOTSTRAP]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.title = "COVID Dashboard - UK Edition"
app.layout = html.Div([
html.Nav(className="navbar navbar-dark fixed-top bg-dark flex-md-nowrap p-0 shadow", children=[
html.A(className="navbar-brand col-sm-3 col-md-2 mr-0", children="COVID-19"),
# dcc.DatePickerRange(className="date-and-location",
# id="month-picker",
# min_date_allowed=date(2020, 1, 30),
# max_date_allowed=date(today.year, today.month, today.day),
# start_date=date(2020, 3, 1),
# end_date=date(today.year, today.month, today.day),
# style={"height": "50%"}
# ),
]),
html.Div(className="container-fluid", children=[
html.Div(className="row", children=[
html.Nav(className="col-md-2 d-none d-md-block bg-light sidebar", children=[
html.Div(className="sidebar-sticky", children=[
html.H6(className="sidebar-heading d-flex px-3 mt-4 mb-1 text-muted", children=[
html.Span("Custom Search"),
]),
html.Ul(className="nav flex-column", children=[
html.Li(className="nav-item", children=[
dcc.Link("User Search", href="/home"),
])]),
html.H6(className="sidebar-heading d-flex px-3 mt-4 mb-1 text-muted", children=[
html.Span("Preset Search"),
]),
dcc.Location(id="url", refresh=False),
html.Ul(className="nav flex-column", children=[
html.Li(className="nav-item", children=[
dcc.Link("Africa", href="/africa"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Asia", href="/asia"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Europe", href="/europe"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("North America", href="/northamerica"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("South America", href="/southamerica"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Oceania", href="/oceania"),
html.Span(className="sr-only"),
]),
]),
html.Div(id='page-content'),
html.Ul(className="nav flex-column mb-2")
]),
]),
html.Main(role="main", className="col-md-9 ml-sm-auto col-lg-10 px-4", children=[
html.Div(className="chartjs-size-monitor", style={"position": "absolute", "left": "0px", "top": "0px", "right": "0px", "bottom": "0px", "overflow": "hidden", "pointer-events": "none", "visibility": "hidden", "z-index": "-1"}),
html.Div(className="box-shadow", children=[
]),
dbc.Row(
[
dbc.Col(children=[
html.H1(children="Deaths"),
html.Hr(className="lead"),
html.Div(id="death-stats", children="######"),
]),
dbc.Col(children=[
html.H1(children="Cases"),
html.Hr(className="lead"),
html.Div(id="cases-stats", children="######"),
]),
dbc.Col(children=[
html.H1(children="Vaccines"),
html.Hr(className="lead"),
html.Div(id="vaccines-stats", children="######"),
]),
]
),
html.Div(className="graphs", children=[
dcc.Graph(
id="cases-graph"
),
dcc.Graph(
id="deaths-graph",
),
]),
])])])])
def dropdown(location, user_enabled, display):
return dcc.Dropdown(
id="location",
options=[
{"label": location, "value": location} for location in test_data["location"].unique()
],
value=location,
searchable=False,
disabled=user_enabled,
style={"display": display}
),
@app.callback(dash.dependencies.Output('page-content', 'children'),
[dash.dependencies.Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/africa':
return dropdown("Africa", True, "none")
elif pathname == '/asia':
return dropdown("Asia", True, "none")
elif pathname == '/europe':
return dropdown("Europe", True, "none")
elif pathname == '/northamerica':
return dropdown("North America", True, "none")
elif pathname == '/southamerica':
return dropdown("South America", True, "none")
elif pathname == '/oceania':
return dropdown("Oceania", True, "none")
else:
return dropdown("United Kingdom", False, "block")
@app.callback(
[
Output("cases-graph", "figure"), Output("deaths-graph", "figure"),
Output("death-stats", "children"), Output("cases-stats", "children"),
Output("vaccines-stats", "children")
],
[
# Input('month-picker', "start_date"),
# Input("month-picker", "end_date"),
Input("location", "value"),
],
)
def update_personal_ouput(value):
# start_date, end_date, ):
filtered_data_cases = test_data.loc[(test_data["location"] == value)]
# //& (test_data["date"] >= start_date) & (test_data["date"] <= end_date)]
fig_deaths = px.bar(filtered_data_cases, x="date", y=["new_deaths_smoothed"], color_discrete_sequence=["mediumaquamarine"], title=f"COVID Deaths - {value}", labels={"value": "Number of Deaths", "date": "Date", "variable": "Legend"})
fig_deaths.update_layout(title_x=0.5, legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01))
fig_deaths.add_scatter(x=filtered_data_cases["date"], y=filtered_data_cases["new_deaths_smoothed"].rolling(window=7, min_periods=7, center=True).mean().round(), name="Rolling Average")
fig_cases = px.bar(filtered_data_cases, x="date", y=["new_cases_smoothed"], color_discrete_sequence=["mediumaquamarine"], title=f"COVID Cases - {value}", labels={"value": "Number of Cases", "date": "Date", "variable": "Legend"})
fig_cases.update_layout(title_x=0.5, legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01))
fig_cases.add_scatter(x=filtered_data_cases["date"], y=filtered_data_cases["new_cases_smoothed"].rolling(window=7, min_periods=7, center=True).mean().round(), name="Rolling Average")
latest_deaths = f'{filtered_data_cases["new_deaths"].iloc[-1]:.0f} today'
latest_cases = f'{filtered_data_cases["new_cases"].iloc[-1]:.0f} today'
latest_vaccines = f'{filtered_data_cases["new_vaccinations"].iloc[-2]:.0f} today'
return fig_deaths, fig_cases, latest_deaths, latest_cases, latest_vaccines
if __name__ == "__main__":
app.run_server(debug=True, dev_tools_ui=False)
| from datetime import date
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.express as px
from dash.dependencies import Input, Output
test_data = pd.read_csv("data/world_data.csv")
today = date.today()
external_stylesheets = [dbc.themes.BOOTSTRAP]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.title = "COVID Dashboard - UK Edition"
app.layout = html.Div([
html.Nav(className="navbar navbar-dark fixed-top bg-dark flex-md-nowrap p-0 shadow", children=[
html.A(className="navbar-brand col-sm-3 col-md-2 mr-0", children="COVID-19"),
# dcc.DatePickerRange(className="date-and-location",
# id="month-picker",
# min_date_allowed=date(2020, 1, 30),
# max_date_allowed=date(today.year, today.month, today.day),
# start_date=date(2020, 3, 1),
# end_date=date(today.year, today.month, today.day),
# style={"height": "50%"}
# ),
]),
html.Div(className="container-fluid", children=[
html.Div(className="row", children=[
html.Nav(className="col-md-2 d-none d-md-block bg-light sidebar", children=[
html.Div(className="sidebar-sticky", children=[
html.H6(className="sidebar-heading d-flex px-3 mt-4 mb-1 text-muted", children=[
html.Span("Custom Search"),
]),
html.Ul(className="nav flex-column", children=[
html.Li(className="nav-item", children=[
dcc.Link("User Search", href="/home"),
])]),
html.H6(className="sidebar-heading d-flex px-3 mt-4 mb-1 text-muted", children=[
html.Span("Preset Search"),
]),
dcc.Location(id="url", refresh=False),
html.Ul(className="nav flex-column", children=[
html.Li(className="nav-item", children=[
dcc.Link("Africa", href="/africa"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Asia", href="/asia"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Europe", href="/europe"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("North America", href="/northamerica"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("South America", href="/southamerica"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Oceania", href="/oceania"),
html.Span(className="sr-only"),
]),
]),
html.Div(id='page-content'),
html.Ul(className="nav flex-column mb-2")
]),
]),
html.Main(role="main", className="col-md-9 ml-sm-auto col-lg-10 px-4", children=[
html.Div(className="chartjs-size-monitor", style={"position": "absolute", "left": "0px", "top": "0px", "right": "0px", "bottom": "0px", "overflow": "hidden", "pointer-events": "none", "visibility": "hidden", "z-index": "-1"}),
html.Div(className="box-shadow", children=[
]),
dbc.Row(
[
dbc.Col(children=[
html.H1(children="Deaths"),
html.Hr(className="lead"),
html.Div(id="death-stats", children="######"),
]),
dbc.Col(children=[
html.H1(children="Cases"),
html.Hr(className="lead"),
html.Div(id="cases-stats", children="######"),
]),
dbc.Col(children=[
html.H1(children="Vaccines"),
html.Hr(className="lead"),
html.Div(id="vaccines-stats", children="######"),
]),
]
),
html.Div(className="graphs", children=[
dcc.Graph(
id="cases-graph"
),
dcc.Graph(
id="deaths-graph",
),
]),
])])])])
def dropdown(location, user_enabled, display):
return dcc.Dropdown(
id="location",
options=[
{"label": location, "value": location} for location in test_data["location"].unique()
],
value=location,
searchable=False,
disabled=user_enabled,
style={"display": display}
),
@app.callback(dash.dependencies.Output('page-content', 'children'),
[dash.dependencies.Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/africa':
return dropdown("Africa", True, "none")
elif pathname == '/asia':
return dropdown("Asia", True, "none")
elif pathname == '/europe':
return dropdown("Europe", True, "none")
elif pathname == '/northamerica':
return dropdown("North America", True, "none")
elif pathname == '/southamerica':
return dropdown("South America", True, "none")
elif pathname == '/oceania':
return dropdown("Oceania", True, "none")
else:
return dropdown("United Kingdom", False, "block")
@app.callback(
[
Output("cases-graph", "figure"), Output("deaths-graph", "figure"),
Output("death-stats", "children"), Output("cases-stats", "children"),
Output("vaccines-stats", "children")
],
[
# Input('month-picker', "start_date"),
# Input("month-picker", "end_date"),
Input("location", "value"),
],
)
def update_personal_ouput(value):
# start_date, end_date, ):
filtered_data_cases = test_data.loc[(test_data["location"] == value)]
# //& (test_data["date"] >= start_date) & (test_data["date"] <= end_date)]
fig_deaths = px.bar(filtered_data_cases, x="date", y=["new_deaths_smoothed"], color_discrete_sequence=["mediumaquamarine"], title=f"COVID Deaths - {value}", labels={"value": "Number of Deaths", "date": "Date", "variable": "Legend"})
fig_deaths.update_layout(title_x=0.5, legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01))
fig_deaths.add_scatter(x=filtered_data_cases["date"], y=filtered_data_cases["new_deaths_smoothed"].rolling(window=7, min_periods=7, center=True).mean().round(), name="Rolling Average")
fig_cases = px.bar(filtered_data_cases, x="date", y=["new_cases_smoothed"], color_discrete_sequence=["mediumaquamarine"], title=f"COVID Cases - {value}", labels={"value": "Number of Cases", "date": "Date", "variable": "Legend"})
fig_cases.update_layout(title_x=0.5, legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01))
fig_cases.add_scatter(x=filtered_data_cases["date"], y=filtered_data_cases["new_cases_smoothed"].rolling(window=7, min_periods=7, center=True).mean().round(), name="Rolling Average")
latest_deaths = f'{filtered_data_cases["new_deaths"].iloc[-1]:.0f} today'
latest_cases = f'{filtered_data_cases["new_cases"].iloc[-1]:.0f} today'
latest_vaccines = f'{filtered_data_cases["new_vaccinations"].iloc[-2]:.0f} today'
return fig_deaths, fig_cases, latest_deaths, latest_cases, latest_vaccines
if __name__ == "__main__":
app.run_server(debug=True, dev_tools_ui=False)
| en | 0.540385 | # dcc.DatePickerRange(className="date-and-location", # id="month-picker", # min_date_allowed=date(2020, 1, 30), # max_date_allowed=date(today.year, today.month, today.day), # start_date=date(2020, 3, 1), # end_date=date(today.year, today.month, today.day), # style={"height": "50%"} # ), #####"), #####"), #####"), # Input('month-picker', "start_date"), # Input("month-picker", "end_date"), # start_date, end_date, ): # //& (test_data["date"] >= start_date) & (test_data["date"] <= end_date)] | 2.772456 | 3 |
dataset/load_data_queue.py | hezhujun/autofocus-rnn | 7 | 10694 | from collections import OrderedDict
import skimage.io as io
from config import get_config
config = get_config()
class LRUCache:
def __init__(self, capacity: int):
self._ordered_dict = OrderedDict()
self._capacity = capacity
def get(self, key):
self._move_to_end_if_exist(key)
return self._ordered_dict.get(key)
def put(self, key, value):
self._move_to_end_if_exist(key)
self._ordered_dict[key] = value
if len(self._ordered_dict) > self._capacity:
key, value = self._ordered_dict.popitem(last=False)
del key
del value
def _move_to_end_if_exist(self, key):
if key in self._ordered_dict:
self._ordered_dict.move_to_end(key)
_cache = LRUCache(config["data_queue_len"])
def get_image(path):
# image = _cache.get(path)
image = None
if image is None:
image = io.imread(path)
# _cache.put(path, image)
return image
| from collections import OrderedDict
import skimage.io as io
from config import get_config
config = get_config()
class LRUCache:
def __init__(self, capacity: int):
self._ordered_dict = OrderedDict()
self._capacity = capacity
def get(self, key):
self._move_to_end_if_exist(key)
return self._ordered_dict.get(key)
def put(self, key, value):
self._move_to_end_if_exist(key)
self._ordered_dict[key] = value
if len(self._ordered_dict) > self._capacity:
key, value = self._ordered_dict.popitem(last=False)
del key
del value
def _move_to_end_if_exist(self, key):
if key in self._ordered_dict:
self._ordered_dict.move_to_end(key)
_cache = LRUCache(config["data_queue_len"])
def get_image(path):
# image = _cache.get(path)
image = None
if image is None:
image = io.imread(path)
# _cache.put(path, image)
return image
| pt | 0.169125 | # image = _cache.get(path) # _cache.put(path, image) | 2.696487 | 3 |
algs/astar.py | jakedolan443/search-algorithm-visualizer | 0 | 10695 | import numpy
from heapq import *
import time
def heuristic(a, b):
return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2
def astar(canvas, array, start, goal):
neighbours = [(0, 1), (0, -1), (1, 0), (-1, 0)]
close_set = set()
came_from = {}
gscore = {start: 0}
fscore = {start: heuristic(start, goal)}
heap_lst = []
heappush(heap_lst, (fscore[start], start))
canvas.in_search = True
while heap_lst:
current = heappop(heap_lst)[1]
if current == goal:
path = []
while current in came_from:
path.append(current)
current = came_from[current]
canvas.finish_search(path)
canvas.in_search = False
return path
close_set.add(current)
for w,h in neighbours:
neighbour = current[0] + w, current[1] + h
temp_g_score = gscore[current] + heuristic(current, neighbour)
if 0 <= neighbour[0] < array.shape[0]:
if 0 <= neighbour[1] < array.shape[1]:
if array[neighbour[0]][neighbour[1]] == 1:
continue
else:
continue
else:
continue
if neighbour in close_set and temp_g_score >= gscore.get(neighbour, 0):
continue
if temp_g_score < gscore.get(neighbour, 0) or neighbour not in [i[1] for i in heap_lst]:
canvas.highlight(neighbour)
time.sleep(canvas.get_root().options['speed']/1000)
came_from[neighbour] = current
gscore[neighbour] = temp_g_score
fscore[neighbour] = temp_g_score + heuristic(neighbour, goal)
heappush(heap_lst, (fscore[neighbour], neighbour))
canvas.in_search = False
return False
| import numpy
from heapq import *
import time
def heuristic(a, b):
return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2
def astar(canvas, array, start, goal):
neighbours = [(0, 1), (0, -1), (1, 0), (-1, 0)]
close_set = set()
came_from = {}
gscore = {start: 0}
fscore = {start: heuristic(start, goal)}
heap_lst = []
heappush(heap_lst, (fscore[start], start))
canvas.in_search = True
while heap_lst:
current = heappop(heap_lst)[1]
if current == goal:
path = []
while current in came_from:
path.append(current)
current = came_from[current]
canvas.finish_search(path)
canvas.in_search = False
return path
close_set.add(current)
for w,h in neighbours:
neighbour = current[0] + w, current[1] + h
temp_g_score = gscore[current] + heuristic(current, neighbour)
if 0 <= neighbour[0] < array.shape[0]:
if 0 <= neighbour[1] < array.shape[1]:
if array[neighbour[0]][neighbour[1]] == 1:
continue
else:
continue
else:
continue
if neighbour in close_set and temp_g_score >= gscore.get(neighbour, 0):
continue
if temp_g_score < gscore.get(neighbour, 0) or neighbour not in [i[1] for i in heap_lst]:
canvas.highlight(neighbour)
time.sleep(canvas.get_root().options['speed']/1000)
came_from[neighbour] = current
gscore[neighbour] = temp_g_score
fscore[neighbour] = temp_g_score + heuristic(neighbour, goal)
heappush(heap_lst, (fscore[neighbour], neighbour))
canvas.in_search = False
return False
| none | 1 | 2.914841 | 3 |
|
examples/serial_client.py | marcinbor85/qupy | 0 | 10696 | import logging
import time
from qupy.framing.slip import Slip
from qupy.interface.serial import SerialPort
from qupy.interface.errors import InterfaceTimeoutError, InterfaceIOError, InterfaceError
from qupy.comm.client import CommClient
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
s = SerialPort()
f = Slip()
c = CommClient(s, f)
connect = True
while True:
if connect:
try:
s.open()
except InterfaceIOError as e:
time.sleep(1.0)
continue
c.start()
connect = False
try:
print('ask...')
data = input()
d = c.ask(data.encode('utf-8'))
print('data:',d)
if len(d) > 0 and d[0] == ord('p'):
break
except InterfaceIOError as e:
print('ask io error', str(e))
c.stop()
s.close()
connect = True
except InterfaceTimeoutError as e:
print('timeout')
c.stop()
s.close()
| import logging
import time
from qupy.framing.slip import Slip
from qupy.interface.serial import SerialPort
from qupy.interface.errors import InterfaceTimeoutError, InterfaceIOError, InterfaceError
from qupy.comm.client import CommClient
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
s = SerialPort()
f = Slip()
c = CommClient(s, f)
connect = True
while True:
if connect:
try:
s.open()
except InterfaceIOError as e:
time.sleep(1.0)
continue
c.start()
connect = False
try:
print('ask...')
data = input()
d = c.ask(data.encode('utf-8'))
print('data:',d)
if len(d) > 0 and d[0] == ord('p'):
break
except InterfaceIOError as e:
print('ask io error', str(e))
c.stop()
s.close()
connect = True
except InterfaceTimeoutError as e:
print('timeout')
c.stop()
s.close()
| none | 1 | 2.526952 | 3 |
|
summary/abs_summarization.py | solarpark7346/sukjulyo | 0 | 10697 | <filename>summary/abs_summarization.py
import torch
from transformers import PreTrainedTokenizerFast
from transformers import BartForConditionalGeneration
class AbsSummarization():
def __init__(self):
self.tokenizer = PreTrainedTokenizerFast.from_pretrained('gogamza/kobart-summarization')
self.model = BartForConditionalGeneration.from_pretrained('gogamza/kobart-summarization')
def predict(self, text):
raw_input_ids = self.tokenizer.encode(text)
input_ids = [self.tokenizer.bos_token_id] + raw_input_ids + [self.tokenizer.eos_token_id]
summary_ids = self.model.generate(torch.tensor([input_ids]))
return self.tokenizer.decode(summary_ids.squeeze().tolist(), skip_special_tokens=True)
abs_summary = AbsSummarization() | <filename>summary/abs_summarization.py
import torch
from transformers import PreTrainedTokenizerFast
from transformers import BartForConditionalGeneration
class AbsSummarization():
def __init__(self):
self.tokenizer = PreTrainedTokenizerFast.from_pretrained('gogamza/kobart-summarization')
self.model = BartForConditionalGeneration.from_pretrained('gogamza/kobart-summarization')
def predict(self, text):
raw_input_ids = self.tokenizer.encode(text)
input_ids = [self.tokenizer.bos_token_id] + raw_input_ids + [self.tokenizer.eos_token_id]
summary_ids = self.model.generate(torch.tensor([input_ids]))
return self.tokenizer.decode(summary_ids.squeeze().tolist(), skip_special_tokens=True)
abs_summary = AbsSummarization() | none | 1 | 2.466064 | 2 |
|
dp_tornado/helper/io/image/__init__.py | donghak-shin/dp-tornado | 18 | 10698 | <reponame>donghak-shin/dp-tornado
# -*- coding: utf-8 -*-
import tempfile
from dp_tornado.engine.helper import Helper as dpHelper
class ImageHelper(dpHelper):
def compare(self, i1, i2, error=0):
i1 = self.load(i1)
i2 = self.load(i2)
if not i1 or not i2:
return None
s1 = i1.size
s2 = i2.size
if s1[0] != s2[0] or s2[1] != s2[1]:
print('size ne,', s1, s2)
return False
i1 = i1.load()
i2 = i2.load()
for i in range(s1[0]):
for j in range(s1[1]):
if i1[i, j] != i2[i, j]:
if error:
for k in range(len(i1[i, j])):
if abs(i1[i, j][k] - i2[i, j][k]) > error:
print('pixel ne,', i1[i, j], i2[i, j], abs(i1[i, j][k] - i2[i, j][k]), error)
return False
else:
return False
return True
def _driver(self, options=None, **kwargs):
if not options and kwargs:
options = kwargs
if options and 'driver' in options and options['driver'] == 'wand':
return self.helper.io.image.driver.wand
return self.helper.io.image.driver.pillow
def load(self, src, options=None, **kwargs):
if not options and kwargs:
options = kwargs
tmp = None
drivers = []
pillow_image = self.helper.io.image.driver.pillow.Image
wand_image = self.helper.io.image.driver.wand.Image
if pillow_image:
drivers.append(pillow_image)
if wand_image:
drivers.append(wand_image)
try:
if isinstance(src, tuple(drivers)):
return src
elif self.helper.web.url.validate(src):
code, res = self.helper.web.http.get.raw(src)
if code != 200:
raise Exception('The specified image url is invalid.')
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.write(res)
tmp.close()
tmp = tmp.name
else:
tmp = None
if not tmp and not src:
raise Exception('The specified image is invalid.')
img = self._driver(options=options).load(tmp if tmp else src)
if not img:
raise Exception('The specified image is invalid.')
return img
except Exception as e:
self.logging.exception(e)
return False
finally:
if tmp:
self.helper.io.file.remove(tmp)
def execute(self, src, fn, options=None, **kwargs):
if not options and kwargs:
options = kwargs
img = self.load(src, options=options)
if not img:
return False
try:
return fn(img, options)
except Exception as e:
self.logging.exception(e)
return False
def size(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
if not img:
return -1, -1
return img.width, img.height
return self.execute(src, fn, options=options)
def crop(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
crop = kwargs['crop'] if 'crop' in kwargs else None
if not crop:
return img
e_top = 0
e_left = 0
e_right = 0
e_bottom = 0
if self.helper.misc.type.check.string(crop):
crop = crop.split(',')
crop = [int(e.strip()) for e in crop]
if self.helper.misc.type.check.numeric(crop):
e_top = e_left = e_right = e_bottom = crop
elif isinstance(crop, (tuple, list)):
if len(crop) == 1:
e_top = e_left = e_right = e_bottom = crop[0]
elif len(crop) == 2:
e_top = e_bottom = crop[0]
e_left = e_right = crop[1]
elif len(crop) == 4:
e_top = crop[0]
e_right = crop[1]
e_bottom = crop[2]
e_left = crop[3]
img = self._driver(options=kwargs).crop(img, e_left, e_top, img.size[0] - e_right, img.size[1] - e_bottom)
return img
return self.execute(src, fn, options=options)
def border(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
border = int(kwargs['border']) if 'border' in kwargs else 0
border_color = kwargs['border_color'] if 'border_color' in kwargs else '#000000'
if not border:
return img
if '_org' in kwargs and 'radius' in kwargs and kwargs['radius']:
return img
img = self._driver(options=kwargs).border(img, border, border_color)
return img
return self.execute(src, fn, options=options)
def radius(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
radius = int(kwargs['radius'] or 0) if 'radius' in kwargs else None
border = int(kwargs['border']) if 'border' in kwargs else 0
border_color = kwargs['border_color'] if 'border_color' in kwargs else '#000000'
if not radius:
return img
elif '__radius_processed__' in img.__dict__:
return img
img = self._driver(options=kwargs).radius(img, radius, border, border_color)
img.__dict__['__radius_processed__'] = True
return img
return self.execute(src, fn, options=options)
def colorize(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
colorize = kwargs['colorize'] if 'colorize' in kwargs else None
if not colorize:
return img
img = self._driver(options=kwargs).colorize(img, colorize)
return img
return self.execute(src, fn, options=options)
def resize(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
size = kwargs['size'] if 'size' in kwargs else None
mode = kwargs['mode'] if 'mode' in kwargs else None
scale = int(kwargs['scale']) if 'scale' in kwargs else 1
limit = True if 'limit' in kwargs and kwargs['limit'] else False
border = int(kwargs['border']) if 'border' in kwargs else 0
if not size:
return img
width_new, height_new = size
width_origin, height_origin = img.size
if scale > 1:
if limit:
scale_max_width = float(width_origin) / float(width_new)
scale_max_height = float(height_origin) / float(height_new)
scale_max = min(scale, scale_max_width, scale_max_height)
else:
scale_max = scale
if scale_max > 1:
width_new = int(width_new * scale_max)
height_new = int(height_new * scale_max)
if not width_new:
width_new = width_origin * height_new / height_origin
mode = self.helper.io.image.mode.resize
if not height_new:
height_new = height_origin * width_new / width_origin
mode = self.helper.io.image.mode.resize
if border:
width_new -= border * 2
height_new -= border * 2
if not mode:
mode = self.helper.io.image.mode.resize
if mode not in self.helper.io.image.mode.modes:
raise Exception('The specified mode is not supported.')
seqs = []
for i, im in self._driver(options=kwargs).iter_seqs(img, kwargs):
# Image Resizing
if mode == self.helper.io.image.mode.center:
im = self._driver(options=kwargs).resize(im, width_new, height_new, kwargs)
elif mode == self.helper.io.image.mode.fill:
ratio_origin = float(width_origin) / float(height_origin)
ratio_new = float(width_new) / float(height_new)
if ratio_origin > ratio_new:
tw = int(round(height_new * ratio_origin))
im = self._driver(options=kwargs).resize(im, tw, height_new)
left = int(round((tw - width_new) / 2.0))
im = self._driver(options=kwargs).crop(im, left, 0, left + width_new, height_new)
elif ratio_origin < ratio_new:
th = int(round(width_new / ratio_origin))
im = self._driver(options=kwargs).resize(im, width_new, th)
top = int(round((th - height_new) / 2.0))
im = self._driver(options=kwargs).crop(im, 0, top, width_new, top + height_new)
else:
im = self._driver(options=kwargs).resize(im, width_new, height_new)
elif mode == self.helper.io.image.mode.resize:
if width_new > width_origin or height_new > height_origin:
width_new = width_origin
height_new = height_origin
im = self._driver(options=kwargs).resize(im, width_new, height_new)
seqs.append(im)
img = seqs[0]
seqs.remove(img)
img.__dict__['__frames__'] = seqs
return img
return self.execute(src, fn, options=options)
def save(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
ext = kwargs['format'] if 'format' in kwargs else None
dest = kwargs['dest'] if 'dest' in kwargs else None
if not dest:
return None
if not ext and self.helper.misc.type.check.string(dest):
ext = self.helper.io.path.ext(dest, dot='').lower()
if not ext and self.helper.misc.type.check.string(src):
ext = self.helper.io.path.ext(src, dot='').lower()
if not ext and '_org' in kwargs and kwargs['_org'] and self.helper.misc.type.check.string(kwargs['_org']):
ext = self.helper.io.path.ext(kwargs['_org'], dot='').lower()
if dest == 's3':
# TODO
return False
if not self._driver(options=kwargs).save(img, ext, dest, kwargs):
return False
return True
return self.execute(src, fn, options=options)
def manipulate(self, src, options=None, **kwargs):
if not options and kwargs:
options = kwargs
options['_org'] = src
try:
img = self.load(src, options=options)
# Crop
img = self.crop(img, options=options)
if not img:
return False
# Resize
img = self.resize(img, options=options)
if not img:
return False
# Radius
img = self.radius(img, options=options)
if not img:
return False
# Border
img = self.border(img, options=options)
if not img:
return False
# Colorize
img = self.colorize(img, options=options)
if not img:
return False
# Save
saved = self.save(img, options=options)
if saved is None:
return img
elif saved is False:
return False
return True
except Exception as e:
self.logging.exception(e)
return False
| # -*- coding: utf-8 -*-
import tempfile
from dp_tornado.engine.helper import Helper as dpHelper
class ImageHelper(dpHelper):
def compare(self, i1, i2, error=0):
i1 = self.load(i1)
i2 = self.load(i2)
if not i1 or not i2:
return None
s1 = i1.size
s2 = i2.size
if s1[0] != s2[0] or s2[1] != s2[1]:
print('size ne,', s1, s2)
return False
i1 = i1.load()
i2 = i2.load()
for i in range(s1[0]):
for j in range(s1[1]):
if i1[i, j] != i2[i, j]:
if error:
for k in range(len(i1[i, j])):
if abs(i1[i, j][k] - i2[i, j][k]) > error:
print('pixel ne,', i1[i, j], i2[i, j], abs(i1[i, j][k] - i2[i, j][k]), error)
return False
else:
return False
return True
def _driver(self, options=None, **kwargs):
if not options and kwargs:
options = kwargs
if options and 'driver' in options and options['driver'] == 'wand':
return self.helper.io.image.driver.wand
return self.helper.io.image.driver.pillow
def load(self, src, options=None, **kwargs):
if not options and kwargs:
options = kwargs
tmp = None
drivers = []
pillow_image = self.helper.io.image.driver.pillow.Image
wand_image = self.helper.io.image.driver.wand.Image
if pillow_image:
drivers.append(pillow_image)
if wand_image:
drivers.append(wand_image)
try:
if isinstance(src, tuple(drivers)):
return src
elif self.helper.web.url.validate(src):
code, res = self.helper.web.http.get.raw(src)
if code != 200:
raise Exception('The specified image url is invalid.')
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.write(res)
tmp.close()
tmp = tmp.name
else:
tmp = None
if not tmp and not src:
raise Exception('The specified image is invalid.')
img = self._driver(options=options).load(tmp if tmp else src)
if not img:
raise Exception('The specified image is invalid.')
return img
except Exception as e:
self.logging.exception(e)
return False
finally:
if tmp:
self.helper.io.file.remove(tmp)
def execute(self, src, fn, options=None, **kwargs):
if not options and kwargs:
options = kwargs
img = self.load(src, options=options)
if not img:
return False
try:
return fn(img, options)
except Exception as e:
self.logging.exception(e)
return False
def size(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
if not img:
return -1, -1
return img.width, img.height
return self.execute(src, fn, options=options)
def crop(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
crop = kwargs['crop'] if 'crop' in kwargs else None
if not crop:
return img
e_top = 0
e_left = 0
e_right = 0
e_bottom = 0
if self.helper.misc.type.check.string(crop):
crop = crop.split(',')
crop = [int(e.strip()) for e in crop]
if self.helper.misc.type.check.numeric(crop):
e_top = e_left = e_right = e_bottom = crop
elif isinstance(crop, (tuple, list)):
if len(crop) == 1:
e_top = e_left = e_right = e_bottom = crop[0]
elif len(crop) == 2:
e_top = e_bottom = crop[0]
e_left = e_right = crop[1]
elif len(crop) == 4:
e_top = crop[0]
e_right = crop[1]
e_bottom = crop[2]
e_left = crop[3]
img = self._driver(options=kwargs).crop(img, e_left, e_top, img.size[0] - e_right, img.size[1] - e_bottom)
return img
return self.execute(src, fn, options=options)
def border(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
border = int(kwargs['border']) if 'border' in kwargs else 0
border_color = kwargs['border_color'] if 'border_color' in kwargs else '#000000'
if not border:
return img
if '_org' in kwargs and 'radius' in kwargs and kwargs['radius']:
return img
img = self._driver(options=kwargs).border(img, border, border_color)
return img
return self.execute(src, fn, options=options)
def radius(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
radius = int(kwargs['radius'] or 0) if 'radius' in kwargs else None
border = int(kwargs['border']) if 'border' in kwargs else 0
border_color = kwargs['border_color'] if 'border_color' in kwargs else '#000000'
if not radius:
return img
elif '__radius_processed__' in img.__dict__:
return img
img = self._driver(options=kwargs).radius(img, radius, border, border_color)
img.__dict__['__radius_processed__'] = True
return img
return self.execute(src, fn, options=options)
def colorize(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
colorize = kwargs['colorize'] if 'colorize' in kwargs else None
if not colorize:
return img
img = self._driver(options=kwargs).colorize(img, colorize)
return img
return self.execute(src, fn, options=options)
def resize(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
size = kwargs['size'] if 'size' in kwargs else None
mode = kwargs['mode'] if 'mode' in kwargs else None
scale = int(kwargs['scale']) if 'scale' in kwargs else 1
limit = True if 'limit' in kwargs and kwargs['limit'] else False
border = int(kwargs['border']) if 'border' in kwargs else 0
if not size:
return img
width_new, height_new = size
width_origin, height_origin = img.size
if scale > 1:
if limit:
scale_max_width = float(width_origin) / float(width_new)
scale_max_height = float(height_origin) / float(height_new)
scale_max = min(scale, scale_max_width, scale_max_height)
else:
scale_max = scale
if scale_max > 1:
width_new = int(width_new * scale_max)
height_new = int(height_new * scale_max)
if not width_new:
width_new = width_origin * height_new / height_origin
mode = self.helper.io.image.mode.resize
if not height_new:
height_new = height_origin * width_new / width_origin
mode = self.helper.io.image.mode.resize
if border:
width_new -= border * 2
height_new -= border * 2
if not mode:
mode = self.helper.io.image.mode.resize
if mode not in self.helper.io.image.mode.modes:
raise Exception('The specified mode is not supported.')
seqs = []
for i, im in self._driver(options=kwargs).iter_seqs(img, kwargs):
# Image Resizing
if mode == self.helper.io.image.mode.center:
im = self._driver(options=kwargs).resize(im, width_new, height_new, kwargs)
elif mode == self.helper.io.image.mode.fill:
ratio_origin = float(width_origin) / float(height_origin)
ratio_new = float(width_new) / float(height_new)
if ratio_origin > ratio_new:
tw = int(round(height_new * ratio_origin))
im = self._driver(options=kwargs).resize(im, tw, height_new)
left = int(round((tw - width_new) / 2.0))
im = self._driver(options=kwargs).crop(im, left, 0, left + width_new, height_new)
elif ratio_origin < ratio_new:
th = int(round(width_new / ratio_origin))
im = self._driver(options=kwargs).resize(im, width_new, th)
top = int(round((th - height_new) / 2.0))
im = self._driver(options=kwargs).crop(im, 0, top, width_new, top + height_new)
else:
im = self._driver(options=kwargs).resize(im, width_new, height_new)
elif mode == self.helper.io.image.mode.resize:
if width_new > width_origin or height_new > height_origin:
width_new = width_origin
height_new = height_origin
im = self._driver(options=kwargs).resize(im, width_new, height_new)
seqs.append(im)
img = seqs[0]
seqs.remove(img)
img.__dict__['__frames__'] = seqs
return img
return self.execute(src, fn, options=options)
def save(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
ext = kwargs['format'] if 'format' in kwargs else None
dest = kwargs['dest'] if 'dest' in kwargs else None
if not dest:
return None
if not ext and self.helper.misc.type.check.string(dest):
ext = self.helper.io.path.ext(dest, dot='').lower()
if not ext and self.helper.misc.type.check.string(src):
ext = self.helper.io.path.ext(src, dot='').lower()
if not ext and '_org' in kwargs and kwargs['_org'] and self.helper.misc.type.check.string(kwargs['_org']):
ext = self.helper.io.path.ext(kwargs['_org'], dot='').lower()
if dest == 's3':
# TODO
return False
if not self._driver(options=kwargs).save(img, ext, dest, kwargs):
return False
return True
return self.execute(src, fn, options=options)
def manipulate(self, src, options=None, **kwargs):
if not options and kwargs:
options = kwargs
options['_org'] = src
try:
img = self.load(src, options=options)
# Crop
img = self.crop(img, options=options)
if not img:
return False
# Resize
img = self.resize(img, options=options)
if not img:
return False
# Radius
img = self.radius(img, options=options)
if not img:
return False
# Border
img = self.border(img, options=options)
if not img:
return False
# Colorize
img = self.colorize(img, options=options)
if not img:
return False
# Save
saved = self.save(img, options=options)
if saved is None:
return img
elif saved is False:
return False
return True
except Exception as e:
self.logging.exception(e)
return False | en | 0.656029 | # -*- coding: utf-8 -*- # Image Resizing # TODO # Crop # Resize # Radius # Border # Colorize # Save | 2.065348 | 2 |
script.py | triethyl/wbut-results-parsed | 1 | 10699 | import requests
from bs4 import BeautifulSoup
import json
import re
# Range of Roll Number - User Input
start_roll = int(input("Starting Roll Number: "))
end_roll = int(input("Ending Roll Number: "))
# Semester - User Input
sem = int(input("Which Semester[1-8]: "))
# Verbosity
verbose = int(input("Verbosity Level (1 for just data, 2 for detailed data): "))
# Roll Number Tuple
roll_tuple = tuple(range(start_roll, end_roll+1))
# Getting the Websites
result_url = 'https://makaut.ucanapply.com/smartexam/public/result-details'
get_result_details = 'https://makaut.ucanapply.com/smartexam/public//get-result-details'
# Semester Codes
semcode = ('SM01', 'SM02', 'SM03', 'SM04', 'SM05', 'SM06', 'SM07', 'SM08')
def get_marks_of(rollNo, semester):
# Handle session cookies appropriately
s = requests.Session()
with s.get(result_url) as r:
while r.status_code != 200:
r = s.get(result_url)
# Parse CSRF-Token
soup = BeautifulSoup(r.text, 'html.parser')
csrf_token = soup.find("meta", {"name":"csrf-token"})['content']
# Create dict for post request
form_data = {'_token': csrf_token, 'p1':'', 'ROLLNO':str(rollNo), 'SEMCODE':semcode[semester-1], 'examtype':'result-details', 'all':''}
# Get Result Data
with s.post(get_result_details, data=form_data) as r:
while r.status_code != 200:
r = s.post(get_result_details, data=form_data)
result_data = json.loads(r.text)['html']
soup = BeautifulSoup(result_data, 'html.parser')
result_data = soup.find("div", {"id":"page-wrap"})
try:
result_data = result_data.get_text()
except AttributeError:
# This result has not yet been published
return
# Basic Data
name = re.findall("Name[^a-zA-Z]*([a-zA-Z ]+)", result_data)[0]
stream = re.findall("B.Tech[^A-Z]*([A-Z]+)", result_data)[0]
roll_num = re.findall("Roll[^0-9]*([0-9]+)", result_data)[0]
reg_num, batch = re.findall("Registration[^0-9]*([0-9]+) OF ([0-9-]+)", result_data)[0]
# Subject Data
def get_subject_data(result_data):
re_mp_fl = [ i for i in filter(lambda x: x!='', [i for i in map(lambda x: x.strip(), re.findall("([^\n]+)", result_data))])]
for i in range(re_mp_fl.index("Subject Code")+6, re_mp_fl.index("Total"),6):
yield(tuple([re_mp_fl[j] for j in range(i, i+6)]))
subject_data = get_subject_data(result_data)
# SGPA YGPA MAR - Prone to errors for odd and even sem
sgpa_odd, odd_year, sgpa_even, even_year, ygpa, cgpa = -1, -1, -1, -1, -1, -1
try:
sgpa_odd = re.findall("ODD\.*\s*\(.*\)[^0-9.]*([0-9.]+)", result_data)[0]
odd_year = re.findall("ODD[^0-9]*([0-9])", result_data)[0]
sgpa_even = re.findall("EVEN\s*\(.*\)[^0-9.]*([0-9.]+)", result_data)[0]
even_year = re.findall("EVEN[^0-9]*([0-9])", result_data)[0]
ygpa = re.findall("YGPA[^0-9]*([0-9.]+)", result_data)[0]
cgpa = re.findall("DGPA[^EVEN]*EVEN\s*\(.*\)[^0-9.]*[0-9.]+\s*([0-9.]+)[^YGPA]*YGPA", result_data)[0]
except IndexError:
pass
return {
'name': name,
'stream': stream,
'roll': roll_num,
'reg_num': reg_num,
'batch': batch,
'marks_per_subject': subject_data,
'sgpa_odd': sgpa_odd,
'odd_year': odd_year,
'sgpa_even': None if sgpa_even == -1 else sgpa_even,
'even_year': None if even_year == -1 else even_year,
'ygpa': None if ygpa == -1 else ygpa,
'cgpa': None if cgpa == -1 else cgpa
}
def print_marks_properly(roll, sem):
data = get_marks_of(roll, sem)
if data != "<TBD>":
for key, value in data.items():
if key == 'marks_per_subject':
print(key,"->")
for x in value:
print(x)
else:
print(key, "->", value)
if verbose == 1:
# Disply most recent
for roll in roll_tuple:
data = get_marks_of(roll, sem)
try:
print(f"({data['name']}, {data['sgpa_odd' if sem%2!=0 else 'sgpa_even']})")
except:
pass
elif verbose == 2:
for roll in roll_tuple:
print_marks_properly(roll, sem)
else:
print("[!] Verbosity Level Wrong!")
| import requests
from bs4 import BeautifulSoup
import json
import re
# Range of Roll Number - User Input
start_roll = int(input("Starting Roll Number: "))
end_roll = int(input("Ending Roll Number: "))
# Semester - User Input
sem = int(input("Which Semester[1-8]: "))
# Verbosity
verbose = int(input("Verbosity Level (1 for just data, 2 for detailed data): "))
# Roll Number Tuple
roll_tuple = tuple(range(start_roll, end_roll+1))
# Getting the Websites
result_url = 'https://makaut.ucanapply.com/smartexam/public/result-details'
get_result_details = 'https://makaut.ucanapply.com/smartexam/public//get-result-details'
# Semester Codes
semcode = ('SM01', 'SM02', 'SM03', 'SM04', 'SM05', 'SM06', 'SM07', 'SM08')
def get_marks_of(rollNo, semester):
# Handle session cookies appropriately
s = requests.Session()
with s.get(result_url) as r:
while r.status_code != 200:
r = s.get(result_url)
# Parse CSRF-Token
soup = BeautifulSoup(r.text, 'html.parser')
csrf_token = soup.find("meta", {"name":"csrf-token"})['content']
# Create dict for post request
form_data = {'_token': csrf_token, 'p1':'', 'ROLLNO':str(rollNo), 'SEMCODE':semcode[semester-1], 'examtype':'result-details', 'all':''}
# Get Result Data
with s.post(get_result_details, data=form_data) as r:
while r.status_code != 200:
r = s.post(get_result_details, data=form_data)
result_data = json.loads(r.text)['html']
soup = BeautifulSoup(result_data, 'html.parser')
result_data = soup.find("div", {"id":"page-wrap"})
try:
result_data = result_data.get_text()
except AttributeError:
# This result has not yet been published
return
# Basic Data
name = re.findall("Name[^a-zA-Z]*([a-zA-Z ]+)", result_data)[0]
stream = re.findall("B.Tech[^A-Z]*([A-Z]+)", result_data)[0]
roll_num = re.findall("Roll[^0-9]*([0-9]+)", result_data)[0]
reg_num, batch = re.findall("Registration[^0-9]*([0-9]+) OF ([0-9-]+)", result_data)[0]
# Subject Data
def get_subject_data(result_data):
re_mp_fl = [ i for i in filter(lambda x: x!='', [i for i in map(lambda x: x.strip(), re.findall("([^\n]+)", result_data))])]
for i in range(re_mp_fl.index("Subject Code")+6, re_mp_fl.index("Total"),6):
yield(tuple([re_mp_fl[j] for j in range(i, i+6)]))
subject_data = get_subject_data(result_data)
# SGPA YGPA MAR - Prone to errors for odd and even sem
sgpa_odd, odd_year, sgpa_even, even_year, ygpa, cgpa = -1, -1, -1, -1, -1, -1
try:
sgpa_odd = re.findall("ODD\.*\s*\(.*\)[^0-9.]*([0-9.]+)", result_data)[0]
odd_year = re.findall("ODD[^0-9]*([0-9])", result_data)[0]
sgpa_even = re.findall("EVEN\s*\(.*\)[^0-9.]*([0-9.]+)", result_data)[0]
even_year = re.findall("EVEN[^0-9]*([0-9])", result_data)[0]
ygpa = re.findall("YGPA[^0-9]*([0-9.]+)", result_data)[0]
cgpa = re.findall("DGPA[^EVEN]*EVEN\s*\(.*\)[^0-9.]*[0-9.]+\s*([0-9.]+)[^YGPA]*YGPA", result_data)[0]
except IndexError:
pass
return {
'name': name,
'stream': stream,
'roll': roll_num,
'reg_num': reg_num,
'batch': batch,
'marks_per_subject': subject_data,
'sgpa_odd': sgpa_odd,
'odd_year': odd_year,
'sgpa_even': None if sgpa_even == -1 else sgpa_even,
'even_year': None if even_year == -1 else even_year,
'ygpa': None if ygpa == -1 else ygpa,
'cgpa': None if cgpa == -1 else cgpa
}
def print_marks_properly(roll, sem):
data = get_marks_of(roll, sem)
if data != "<TBD>":
for key, value in data.items():
if key == 'marks_per_subject':
print(key,"->")
for x in value:
print(x)
else:
print(key, "->", value)
if verbose == 1:
# Disply most recent
for roll in roll_tuple:
data = get_marks_of(roll, sem)
try:
print(f"({data['name']}, {data['sgpa_odd' if sem%2!=0 else 'sgpa_even']})")
except:
pass
elif verbose == 2:
for roll in roll_tuple:
print_marks_properly(roll, sem)
else:
print("[!] Verbosity Level Wrong!")
| en | 0.740917 | # Range of Roll Number - User Input # Semester - User Input # Verbosity # Roll Number Tuple # Getting the Websites # Semester Codes # Handle session cookies appropriately # Parse CSRF-Token # Create dict for post request # Get Result Data # This result has not yet been published # Basic Data # Subject Data # SGPA YGPA MAR - Prone to errors for odd and even sem # Disply most recent | 2.981308 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.