ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b412891ab067b388c83f9c07351d2ea4eb52327c | '''
TODO:
* syntax checking (paren count, etc)
* in parse or in get_expr?
* argparse, argv, cli for flags?
'''
from parse import parse
from reg import fetch, assign, EXPR, VAL
from reg import clear_registers
from stack import clear_stack
from mem import clear_memory
from env import initialize_env
from run import run
from stats import display_stats
INTERPRETER_PROMPT = '<<< '
INTERPRETER_EXIT = '.quit', '.exit'
EXIT_MESSAGE = 'Byeeeeeeee!'
def repl():
info_flag = 0
stats_flag = 1
initialize()
while True:
try:
get_expr()
run(info_flag=info_flag)
display_result(stats_flag=stats_flag)
except KeyboardInterrupt:
print()
break
# except Exception as e: # better way to do this?
# print(e)
def ecio_eval(expr):
'''Evaluates an expression without invoking the repl'''
initialize()
parse_and_set_expr(expr)
run()
return get_result()
def initialize():
# optional
clear_registers()
clear_stack()
clear_memory()
# required
initialize_env()
def get_expr():
expr = input(INTERPRETER_PROMPT)
if expr in INTERPRETER_EXIT:
raise Exception(EXIT_MESSAGE)
else:
parse_and_set_expr(expr)
def display_result(stats_flag=1):
print(get_result())
print()
display_stats(stats_flag)
print()
def parse_and_set_expr(lisp_expr):
parsed = parse(lisp_expr)
assign(EXPR, parsed)
def get_result():
return fetch(VAL)
if __name__ == '__main__':
repl()
|
py | b41289c03a352edd7f864ef1cf96a8fd851074d9 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class LoadBalancerBackendAddressPoolsOperations(object):
"""LoadBalancerBackendAddressPoolsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-11-01"
self.config = config
def list(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of BackendAddressPool
:rtype:
~azure.mgmt.network.v2017_11_01.models.BackendAddressPoolPaged[~azure.mgmt.network.v2017_11_01.models.BackendAddressPool]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.BackendAddressPoolPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.BackendAddressPoolPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'}
def get(
self, resource_group_name, load_balancer_name, backend_address_pool_name, custom_headers=None, raw=False, **operation_config):
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address
pool.
:type backend_address_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: BackendAddressPool or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_11_01.models.BackendAddressPool or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BackendAddressPool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'}
|
py | b41289da1d2d1fea5bd3d77748ac9ad438a89851 | # dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR, except for size_divisor=32
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[[
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])
|
py | b4128a2f18e8d9a19c425a737b55bacef2924a39 | # -*- coding: utf-8 -*-
""" Shelter (Camp) Registry, model
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3ShelterModel",
"S3ShelterRegistrationModel",
"cr_shelter_rheader",
"cr_update_shelter_population",
"cr_update_housing_unit_population",
"cr_update_capacity_from_housing_units",
"cr_check_population_availability",
"cr_notification_dispatcher",
)
try:
# try stdlib (Python 2.6)
import json
except ImportError:
try:
# try external module
import simplejson as json
except:
# fallback to pure-Python module
import gluon.contrib.simplejson as json
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
NIGHT = 1
DAY_AND_NIGHT = 2
# =============================================================================
class S3ShelterModel(S3Model):
names = ("cr_shelter_type",
"cr_shelter_service",
"cr_shelter",
"cr_shelter_id",
"cr_shelter_status",
"cr_shelter_person",
"cr_shelter_allocation",
"cr_shelter_unit",
)
# Define a function model() which takes no parameters (except self):
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
settings = current.deployment_settings
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
messages = current.messages
super_link = self.super_link
set_method = self.set_method
NAME = T("Name")
# -------------------------------------------------------------------------
# Shelter types
# e.g. NGO-operated, Government evacuation center, School, Hospital -- see Agasti opt_camp_type.)
tablename = "cr_shelter_type"
define_table(tablename,
Field("name", notnull=True,
label = NAME,
requires = IS_NOT_ONE_OF(db,
"%s.name" % tablename),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
if settings.get_ui_label_camp():
ADD_SHELTER_TYPE = T("Add Camp Type")
SHELTER_TYPE_LABEL = T("Camp Type")
crud_strings[tablename] = Storage(
label_create = ADD_SHELTER_TYPE,
title_display = T("Camp Type Details"),
title_list = T("Camp Types"),
title_update = T("Edit Camp Type"),
label_list_button = T("List Camp Types"),
msg_record_created = T("Camp Type added"),
msg_record_modified = T("Camp Type updated"),
msg_record_deleted = T("Camp Type deleted"),
msg_list_empty = T("No Camp Types currently registered"))
else:
ADD_SHELTER_TYPE = T("Create Shelter Type")
SHELTER_TYPE_LABEL = T("Shelter Type")
crud_strings[tablename] = Storage(
label_create = ADD_SHELTER_TYPE,
title_display = T("Shelter Type Details"),
title_list = T("Shelter Types"),
title_update = T("Edit Shelter Type"),
label_list_button = T("List Shelter Types"),
msg_record_created = T("Shelter Type added"),
msg_record_modified = T("Shelter Type updated"),
msg_record_deleted = T("Shelter Type deleted"),
msg_list_empty = T("No Shelter Types currently registered"))
configure(tablename,
deduplicate = self.cr_shelter_type_duplicate,
)
represent = S3Represent(lookup=tablename)
shelter_type_id = S3ReusableField("shelter_type_id", "reference %s" % tablename,
label = SHELTER_TYPE_LABEL,
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cr_shelter_type.id",
represent)),
comment=S3AddResourceLink(c="cr",
f="shelter_type",
label=ADD_SHELTER_TYPE),
)
# -------------------------------------------------------------------------
# Shelter services
# e.g. medical, housing, food, ...
tablename = "cr_shelter_service"
define_table(tablename,
Field("name", notnull=True,
label = NAME,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
if settings.get_ui_label_camp():
ADD_SHELTER_SERVICE = T("Add Camp Service")
SHELTER_SERVICE_LABEL = T("Camp Service")
crud_strings[tablename] = Storage(
label_create = ADD_SHELTER_SERVICE,
title_display = T("Camp Service Details"),
title_list = T("Camp Services"),
title_update = T("Edit Camp Service"),
label_list_button = T("List Camp Services"),
msg_record_created = T("Camp Service added"),
msg_record_modified = T("Camp Service updated"),
msg_record_deleted = T("Camp Service deleted"),
msg_list_empty = T("No Camp Services currently registered"))
else:
ADD_SHELTER_SERVICE = T("Create Shelter Service")
SHELTER_SERVICE_LABEL = T("Shelter Service")
crud_strings[tablename] = Storage(
label_create = ADD_SHELTER_SERVICE,
title_display = T("Shelter Service Details"),
title_list = T("Shelter Services"),
title_update = T("Edit Shelter Service"),
label_list_button = T("List Shelter Services"),
msg_record_created = T("Shelter Service added"),
msg_record_modified = T("Shelter Service updated"),
msg_record_deleted = T("Shelter Service deleted"),
msg_list_empty = T("No Shelter Services currently registered"))
service_represent = S3Represent(lookup=tablename)
service_multirepresent = S3Represent(lookup=tablename,
multiple=True
)
shelter_service_id = S3ReusableField("shelter_service_id",
"list:reference cr_shelter_service",
label = SHELTER_SERVICE_LABEL,
ondelete = "RESTRICT",
represent = self.cr_shelter_service_multirepresent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"cr_shelter_service.id",
self.cr_shelter_service_represent,
multiple=True)),
sortby = "name",
comment = S3AddResourceLink(c="cr",
f="shelter_service",
label=ADD_SHELTER_SERVICE),
widget = S3MultiSelectWidget(header=False,
)
)
# -------------------------------------------------------------------------
# Shelter Environmental Characteristics
# e.g. Lake, Mountain, ground type.
tablename = "cr_shelter_environment"
define_table(tablename,
Field("name", notnull=True,
label = NAME,
),
s3_comments(),
*s3_meta_fields())
environment_represent = S3Represent(lookup=tablename)
environment_multirepresent = S3Represent(lookup=tablename,
multiple=True
)
shelter_environment_id = S3ReusableField("cr_shelter_environment_id",
"list:reference cr_shelter_environment",
label = "Environmental Characteristics",
ondelete = "RESTRICT",
represent = environment_multirepresent,
requires = IS_EMPTY_OR(IS_ONE_OF(db,
"cr_shelter_environment.id",
environment_represent,
multiple=True)),
sortby = "name",
widget = S3MultiSelectWidget()
)
# -------------------------------------------------------------------------
cr_shelter_opts = {1 : T("Closed"),
2 : T("Open")
}
dynamic = settings.get_cr_shelter_population_dynamic()
if not settings.get_cr_shelter_housing_unit_management():
capacity_day_comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Capacity (Day and Night)"),
T("Capacity of the shelter for people who need to stay both day and night")))
capacity_night_comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Capacity (Night only)"),
T("Capacity of the shelter for people who need to stay for night only"))),
else:
capacity_day_comment = DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Capacity (Day and Night)"),
T("Capacity of the shelter for people who need to stay both day and night"),
T("Capacity evaluated adding all defined housing unit capacities")))
capacity_night_comment = DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Capacity (Night only)"),
T("Capacity of the shelter for people who need to stay for night only"),
T("Capacity evaluated adding all defined housing unit capacities")))
tablename = "cr_shelter"
define_table(tablename,
super_link("doc_id", "doc_entity"),
super_link("pe_id", "pr_pentity"),
super_link("site_id", "org_site"),
#Field("code",
# length=10, # Mayon compatibility
# notnull=True,
# unique=True, label=T("Code")),
Field("name", notnull=True,
length=64, # Mayon compatibility
label = T("Shelter Name"),
requires = IS_NOT_EMPTY(),
),
self.org_organisation_id(
requires = self.org_organisation_requires(updateable=True),
),
shelter_type_id(), # e.g. NGO-operated, Government evacuation center, School, Hospital -- see Agasti opt_camp_type.)
shelter_service_id(), # e.g. medical, housing, food, ...
shelter_environment_id(readable = False,
writable = False,),# Enable in template if-required
self.gis_location_id(),
Field("phone",
label = T("Phone"),
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("email", "string",
label = T("Email"),
),
self.pr_person_id(label = T("Contact Person / Camp Owner")),
#Static field
Field("population", "integer",
label = T("Estimated Population"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 999999)),
readable = not dynamic,
writable = not dynamic,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Current estimated population"),
T("Current estimated population in shelter. Staff, Volunteers and Evacuees."))),
),
Field("capacity_day", "integer",
default = 0,
label = T("Evacuees Capacity (Day and Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 999999)),
comment = capacity_day_comment,
),
Field("capacity_night", "integer",
default = 0,
label = T("Evacuees Capacity (Night only)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 999999)),
comment = capacity_night_comment,
),
# Dynamic field
Field("available_capacity_day", "integer",
default = 0,
label = T("Evacuees Available Capacity (Day and Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 999999)),
readable = dynamic,
# Automatically updated
writable = False,
),
# Dynamic field
Field("available_capacity_night", "integer",
default = 0,
label = T("Evacuees Available Capacity (Night only)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 999999)),
readable = dynamic,
# Automatically updated
writable = False,
),
# Dynamic field
Field("population_day", "integer",
default = 0,
label = T("Evacuees Current Population (Day and Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 999999)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Population (Day)"),
T("Number of evacuees registered in the shelter for day and night"))),
readable = dynamic,
# Automatically updated
writable = False
),
# Dynamic field
Field("population_night", "integer",
default = 0,
label = T("Evacuues Current Population (Night only)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 999999)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Population (Night)"),
T("Number of people registered in the shelter for night only"))),
readable = dynamic,
# Automatically updated
writable = False
),
Field("status", "integer",
label = T("Status"),
represent = lambda opt: \
cr_shelter_opts.get(opt, messages.UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cr_shelter_opts)
),
),
Field("source",
label = T("Source"),
readable = False,
writable = False,
),
s3_comments(),
Field("obsolete", "boolean",
default = False,
label = T("Obsolete"),
represent = lambda opt: \
(opt and [T("Obsolete")] or [messages["NONE"]])[0],
readable = False,
writable = False,
),
*s3_meta_fields())
# CRUD strings
if settings.get_ui_label_camp():
ADD_SHELTER = T("Add Camp")
SHELTER_LABEL = T("Camp")
SHELTER_HELP = T("The Camp this Request is from")
crud_strings[tablename] = Storage(
label_create = ADD_SHELTER,
title_display = T("Camp Details"),
title_list = T("Camps"),
title_update = T("Edit Camp"),
label_list_button = T("List Camps"),
msg_record_created = T("Camp added"),
msg_record_modified = T("Camp updated"),
msg_record_deleted = T("Camp deleted"),
msg_list_empty = T("No Camps currently registered"))
else:
ADD_SHELTER = T("Create Shelter")
SHELTER_LABEL = T("Shelter")
SHELTER_HELP = T("The Shelter this Request is from")
crud_strings[tablename] = Storage(
label_create = ADD_SHELTER,
title_display = T("Shelter Details"),
title_list = T("Shelters"),
title_update = T("Edit Shelter"),
label_list_button = T("List Shelters"),
msg_record_created = T("Shelter added"),
msg_record_modified = T("Shelter updated"),
msg_record_deleted = T("Shelter deleted"),
msg_list_empty = T("No Shelters currently registered"))
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
report_fields = ["name",
"shelter_type_id",
#"organisation_id",
"status",
]
if dynamic:
report_fields.extend(("population_day",
"population_night",
))
else:
# Manual
report_fields.append("population")
text_fields = ["name",
"code",
"comments",
"organisation_id$name",
"organisation_id$acronym",
"location_id$name",
]
list_fields = ["id",
"name",
"status",
"shelter_type_id",
#"shelter_service_id",
]
if dynamic:
list_fields.extend(("capacity_day",
"capacity_night",
"population_day",
"population_night",
))
else:
# Manual
list_fields.append("population")
list_fields.append("location_id$addr_street")
#list_fields.append("person_id")
for level in levels:
lfield = "location_id$%s" % level
report_fields.append(lfield)
text_fields.append(lfield)
list_fields.append(lfield)
cr_shelter_status_filter_opts = dict(cr_shelter_opts)
cr_shelter_status_filter_opts[None] = T("Unspecified")
if settings.get_org_branches():
org_filter = S3HierarchyFilter("organisation_id",
leafonly = False,
)
else:
org_filter = S3OptionsFilter("organisation_id",
filter = True,
header = "",
#hidden = True,
)
filter_widgets = [
S3TextFilter(text_fields,
label = T("Name"),
_class = "filter-search",
),
S3OptionsFilter("shelter_type_id",
label = T("Type"),
# Doesn't translate
#represent = "%(name)s",
),
org_filter,
S3LocationFilter("location_id",
label = T("Location"),
levels = levels,
),
S3OptionsFilter("status",
label = T("Status"),
options = cr_shelter_status_filter_opts,
none = True,
),
]
if dynamic:
filter_widgets.append(S3RangeFilter("available_capacity_night",
label = T("Available Capacity (Night)"),
))
filter_widgets.append(S3RangeFilter("capacity_night",
label = T("Total Capacity (Night)"),
))
if settings.get_cr_shelter_people_registration():
# Go to People check-in for this shelter after creation
create_next = URL(c="cr", f="shelter",
args=["[id]", "shelter_registration"])
else:
create_next = None
configure(tablename,
create_next = create_next,
deduplicate = self.cr_shelter_duplicate,
filter_widgets = filter_widgets,
list_fields = list_fields,
onaccept = self.cr_shelter_onaccept,
report_options = Storage(
rows=report_fields,
cols=report_fields,
fact=report_fields,
defaults=Storage(rows = lfield, # Lowest-level of hierarchy
cols="status",
fact="count(name)",
totals=True)
),
super_entity = ("org_site", "doc_entity", "pr_pentity"),
)
# Reusable field
represent = S3Represent(lookup=tablename)
shelter_id = S3ReusableField("shelter_id", "reference %s" % tablename,
label = SHELTER_LABEL,
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cr_shelter.id",
represent,
sort=True)),
comment = S3AddResourceLink(c="cr",
f="shelter",
label=ADD_SHELTER,
title=SHELTER_LABEL,
tooltip="%s (%s)." % (SHELTER_HELP,
T("optional"))),
widget = S3AutocompleteWidget("cr", "shelter")
)
self.add_components(tablename,
cr_shelter_allocation = "shelter_id",
cr_shelter_registration = "shelter_id",
cr_shelter_unit = "shelter_id",
cr_shelter_status = {"name": "status",
"joinby": "shelter_id",
},
event_event_shelter = "shelter_id",
evr_case = "shelter_id",
)
# Custom Method to Assign HRs
set_method("cr", "shelter",
method = "assign",
action = self.hrm_AssignMethod(component="human_resource_site"))
set_method("cr", "shelter",
method = "dispatch",
action = cr_notification_dispatcher)
# -------------------------------------------------------------------------
# Shelter statuses
# - a historical record of shelter status: opening/closing dates & populations
#
tablename = "cr_shelter_status"
define_table(tablename,
shelter_id(ondelete = "CASCADE"),
s3_date(),
Field("status", "integer",
label = T("Status"),
represent = lambda opt: \
cr_shelter_opts.get(opt, messages.UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cr_shelter_opts)
),
),
Field("population", "integer",
label = T("Population"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 999999)),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
if settings.get_ui_label_camp():
crud_strings[tablename] = Storage(
label_create = T("Add Camp Status"),
title_display = T("Camp Status Details"),
title_list = T("Camp Statuses"),
title_update = T("Edit Camp Status"),
label_list_button = T("List Camp Statuses"),
msg_record_created = T("Camp Status added"),
msg_record_modified = T("Camp Status updated"),
msg_record_deleted = T("Camp Status deleted"),
msg_list_empty = T("No Camp Statuses currently registered"))
else:
crud_strings[tablename] = Storage(
label_create = T("Create Shelter Status"),
title_display = T("Shelter Status Details"),
title_list = T("Shelter Statuses"),
title_update = T("Edit Shelter Status"),
label_list_button = T("List Shelter Statuses"),
msg_record_created = T("Shelter Status added"),
msg_record_modified = T("Shelter Status updated"),
msg_record_deleted = T("Shelter Status deleted"),
msg_list_empty = T("No Shelter Statuses currently registered"))
cr_housing_unit_opts = {1 : T("Available"),
2 : T("Not Available"),
}
cr_housing_unit_handicap_facilities = {1: T("Available"),
2: T("Suitable"),
3: T("Not Available")
}
tablename = "cr_shelter_unit"
define_table(tablename,
Field("name", notnull=True,
length=64,
label = T("Housing Unit Name"),
requires = IS_NOT_EMPTY(),
),
shelter_id(ondelete="CASCADE"),
Field("status", "integer",
default = 1,
label = T("Status"),
represent = lambda opt: \
cr_housing_unit_opts.get(opt, messages.UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cr_housing_unit_opts))
),
Field("bath", "boolean",
default = True,
label = T("Available Bath"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Bath Availability"),
T("Integrated bath within housing unit"))),
),
Field("handicap_bath", "integer",
default = 1,
label = T("Bath with handicap facilities"),
represent = lambda opt: \
cr_housing_unit_handicap_facilities.get(opt, messages.UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cr_housing_unit_handicap_facilities)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Bath Handicap Facilities"),
T("Availability of bath handicap facilities"))),
),
Field("shower", "boolean",
default = True,
label = T("Available Shower"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Shower Availability"),
T("Integrated shower within housing unit"))),
),
Field("handicap_shower", "integer",
default = 1,
label = T("Shower with handicap facilities"),
represent = lambda opt: \
cr_housing_unit_handicap_facilities.get(opt, messages.UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cr_housing_unit_handicap_facilities)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Shower Handicap Facilities"),
T("Availability of shower handicap facilities"))),
),
Field("capacity_day", "integer",
default = 0,
label = T("Housing Unit Capacity (Day and Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 999999)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Module Day and Night Capacity"),
T("Capacity of the housing unit for people who need to stay both day and night"))),
),
Field("capacity_night", "integer",
default = 0,
label = T("Housing Unit Capacity (Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 999999)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Module Night Capacity"),
T("Capacity of the housing unit for people who need to stay both day and night"))),
),
Field("available_capacity_day", "integer",
default = 0,
label = T("Population Availability (Day and Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 999999)),
comment = DIV(_class="tooltip",
_title="%s" % (T("Current Population Availability (Day and Night)"))),
# Automatically updated
readable = dynamic,
writable = False
),
Field("available_capacity_night", "integer",
default = 0,
label = T("Population Availability (Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 999999)),
comment = DIV(_class="tooltip",
_title="%s" % (T("Current Population Availability (Night)"))),
# Automatically updated
readable = dynamic,
writable = False
),
Field("population_day", "integer",
default = 0,
label = T("Current Population (Day and Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 999999)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Housing Unit Current Population"),
T("Number of evacuees registered in this housing unit (Day and Night)"))),
# Automatically updated
readable = False,
writable = False
),
Field("population_night", "integer",
default = 0,
label = T("Current Population (Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 999999)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Housing Unit Current Population"),
T("Number of evacuees registered in this housing unit (Night)"))),
readable = False,
# Automatically updated
writable = False
),
Field("domestic_animals", "boolean",
default = False,
label = T("Free for domestic animals"),
),
s3_comments(),
*s3_meta_fields())
list_fields = ["id",
"name",
"status",
"handicap_bath",
"capacity_day",
"capacity_night",
"population_day",
"population_night",
]
population_onaccept = lambda form: \
S3ShelterRegistrationModel.shelter_population_onaccept(form,
tablename="cr_shelter_unit")
configure(tablename,
#deduplicate = self.cr_shelter_unit_duplicate,
list_fields = list_fields,
onaccept = population_onaccept,
ondelete = population_onaccept,
)
represent = S3Represent(lookup="cr_shelter_unit")
housing_unit_id = S3ReusableField("shelter_unit_id", db.cr_shelter_unit,
label = "Housing Unit Name",
ondelete = "RESTRICT",
represent = represent,
requires = IS_NULL_OR(IS_ONE_OF(db, "cr_shelter_unit.id",
represent,
orderby="shelter_id",
#sort=True
)),
#widget = S3AutocompleteWidget("cr", "shelter_unit")
)
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
return dict(ADD_SHELTER = ADD_SHELTER,
SHELTER_LABEL = SHELTER_LABEL,
cr_shelter_id = shelter_id,
cr_housing_unit_id = housing_unit_id,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(cr_shelter_id = lambda **attr: dummy("shelter_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def cr_shelter_onaccept(form):
"""
After DB I/O
"""
form_vars = form.vars
# Update Affiliation, record ownership and component ownership
current.s3db.org_update_affiliations("cr_shelter", form_vars)
if current.deployment_settings.get_cr_shelter_population_dynamic():
# Update population and available capacity
cr_update_shelter_population(form_vars.id)
# @ToDo: Update/Create a cr_shelter_status record
return
# -------------------------------------------------------------------------
@staticmethod
def cr_shelter_status_onaccept(form):
"""
After DB I/O
"""
# @ToDo: Update the cr_shelter record
# Status & Population
return
# -------------------------------------------------------------------------
@staticmethod
def cr_shelter_duplicate(item):
"""
Shelter record duplicate detection, used for the deduplicate hook
@param item: the S3ImportItem to check
"""
if item.tablename == "cr_shelter":
data = item.data
#org = "organisation_id" in data and data.organisation_id
address = "address" in data and data.address
table = item.table
query = (table.name == data.name)
#if org:
# query = query & (table.organisation_id == org)
if address:
query = query & (table.address == address)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if row:
item.id = row.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def cr_shelter_type_duplicate(item):
"""
Shelter Type record duplicate detection, used for the deduplicate hook
@param item: the S3ImportItem to check
"""
if item.tablename == "cr_shelter_type":
table = item.table
query = (table.name == item.data.name)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if row:
item.id = row.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def cr_shelter_unit_duplicate(item):
"""
Shelter housing unit record duplicate detection, used for the deduplicate hook
@param item: the S3ImportItem to check
"""
if item.tablename == "cr_shelter_unit":
table = item.table
query = (table.name == item.data.name)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if row:
item.id = row.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def cr_shelter_service_represent(id, row=None):
""" FK representation """
if row:
return row.name
elif not id:
return current.messages["NONE"]
db = current.db
table = db.cr_shelter_service
r = db(table.id == id).select(table.name,
limitby = (0, 1)).first()
try:
return r.name
except:
return current.messages.UNKNOWN_OPT
# -----------------------------------------------------------------------------
@staticmethod
def cr_shelter_service_multirepresent(shelter_service_ids):
"""
"""
if not shelter_service_ids:
return current.messages["NONE"]
db = current.db
table = db.cr_shelter_service
if isinstance(shelter_service_ids, (list, tuple)):
query = (table.id.belongs(shelter_service_ids))
shelter_services = db(query).select(table.name)
return ", ".join([s.name for s in shelter_services])
else:
query = (table.id == shelter_service_ids)
shelter_service = db(query).select(table.name,
limitby=(0, 1)).first()
try:
return shelter_service.name
except:
return current.messages.UNKNOWN_OPT
# =============================================================================
class S3ShelterRegistrationModel(S3Model):
names = ("cr_shelter_allocation",
"cr_shelter_registration",
)
def model(self):
T = current.T
define_table = self.define_table
configure = self.configure
settings = current.deployment_settings
# ---------------------------------------------------------------------
# Shelter Allocation: table to allocate shelter capacity to a group
#
allocation_status_opts = {1: T("requested"),
2: T("available"),
3: T("allocated"),
4: T("occupied"),
5: T("departed"),
6: T("obsolete"),
7: T("unavailable"),
}
tablename = "cr_shelter_allocation"
define_table(tablename,
self.cr_shelter_id(ondelete="CASCADE"),
self.pr_group_id(comment = None),
Field("status", "integer",
requires = IS_IN_SET(allocation_status_opts),
represent = S3Represent(options = allocation_status_opts),
default = 3),
Field("group_size_day", "integer",
default = 0),
Field("group_size_night", "integer",
default = 0),
*s3_meta_fields())
population_onaccept = lambda form: \
self.shelter_population_onaccept(form,
tablename="cr_shelter_allocation")
configure(tablename,
onaccept = population_onaccept,
ondelete = population_onaccept,
)
# ---------------------------------------------------------------------
# Shelter Registration: table to register a person to a shelter
#
cr_day_or_night_opts = {NIGHT: T("Night only"),
DAY_AND_NIGHT: T("Day and Night")
}
cr_registration_status_opts = {1: T("Planned"),
2: T("Checked-in"),
3: T("Checked-out"),
}
housing_unit = settings.get_cr_shelter_housing_unit_management()
tablename = "cr_shelter_registration"
self.define_table(tablename,
self.cr_shelter_id(empty = False,
ondelete = "CASCADE",
),
# The comment explains how to register a new person
# it should not be done in a popup
self.pr_person_id(
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Person"),
T("Type the name of a registered person \
or to add an unregistered person to this \
shelter click on Evacuees")
)
),
),
self.cr_housing_unit_id(readable = housing_unit,
writable = housing_unit,
),
Field("day_or_night", "integer",
label = T("Presence in the shelter"),
represent = S3Represent(
options=cr_day_or_night_opts
),
requires = IS_IN_SET(cr_day_or_night_opts,
zero=None
),
),
Field("registration_status", "integer",
label = T("Status"),
represent = S3Represent(
options=cr_registration_status_opts,
),
requires = IS_IN_SET(cr_registration_status_opts,
zero=None
),
),
s3_datetime("check_in_date",
label = T("Check-in date"),
default = "now",
#empty = False,
future = 0,
),
s3_datetime("check_out_date",
label = T("Check-out date"),
),
s3_comments(),
*s3_meta_fields())
population_onaccept = lambda form: \
self.shelter_population_onaccept(form,
tablename="cr_shelter_registration")
if housing_unit:
configure(tablename,
onvalidation = self.unit_onvalidation,
onaccept = population_onaccept,
ondelete = population_onaccept,
)
else:
configure(tablename,
onaccept = population_onaccept,
ondelete = population_onaccept,
)
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
return dict()
# -------------------------------------------------------------------------
@staticmethod
def unit_onvalidation(form):
"""
Check if the housing unit belongs to the requested shelter
"""
db = current.db
T = current.T
htable = db.cr_shelter_unit
if type(form) is Row:
if current.request.controller == "evr":
shelter_id = form.shelter_id
unit_id = form.shelter_unit_id
elif current.request.controller == "cr":
shelter_id = current.request.args[0]
unit_id = form.shelter_unit_id
else:
if current.request.controller == "evr":
shelter_id = form.vars.shelter_id
unit_id = form.vars.shelter_unit_id
elif current.request.controller == "cr":
shelter_id = current.request.args[0]
unit_id = form.vars.shelter_unit_id
if unit_id == None:
warning = T("Warning: No housing unit selected")
current.response.warning = warning
else:
record = db(htable.id == unit_id).select(htable.shelter_id).first()
shelter_value = str(record.shelter_id)
if shelter_value != shelter_id:
error = T("You have to select a housing unit belonged to the shelter")
form.errors["branch_id"] = error
current.response.error = error
return
# -------------------------------------------------------------------------
@staticmethod
def shelter_population_onaccept(form, tablename=None):
db = current.db
if not tablename:
return
table = current.s3db[tablename]
try:
if type(form) is Row:
record_id = form.id
else:
record_id = form.vars.id
except:
# Nothing we can do
return
row = db(table._id == record_id).select(table._id,
table.shelter_id,
table.deleted,
table.deleted_fk,
limitby=(0, 1)).first()
if row:
if row.deleted:
if row.deleted_fk:
deleted_fk = json.loads(row.deleted_fk)
else:
return
shelter_id = deleted_fk.get("shelter_id")
else:
shelter_id = row.shelter_id
if shelter_id:
if current.deployment_settings.get_cr_shelter_housing_unit_management():
cr_update_capacity_from_housing_units(shelter_id)
cr_update_shelter_population(shelter_id)
return
# =============================================================================
def cr_shelter_rheader(r, tabs=[]):
""" Resource Headers """
if r.representation != "html":
# RHeaders only used in interactive views
return None
rheader = None
tablename, record = s3_rheader_resource(r)
if tablename == "cr_shelter" and record:
T = current.T
s3db = current.s3db
if not tabs:
settings = current.deployment_settings
tabs = [(T("Basic Details"), None),
(T("Status Reports"), "status"),
]
if settings.get_cr_shelter_people_registration():
tabs.extend([(T("People Reservation"), "shelter_allocation"),
(T("People Registration"), "shelter_registration"),
])
if settings.has_module("hrm"):
STAFF = settings.get_hrm_staff_label()
tabs.append((STAFF, "human_resource"))
permit = current.auth.s3_has_permission
if permit("update", tablename, r.id) and \
permit("create", "hrm_human_resource_site"):
tabs.append((T("Assign %(staff)s") % dict(staff=STAFF), "assign"))
if settings.get_cr_shelter_housing_unit_management():
tabs.append((T("Housing Units"), "shelter_unit"))
#tabs.append((T("Events"), "event_shelter"))
#if settings.has_module("assess"):
# tabs.append((T("Assessments"), "rat"))
try:
tabs = tabs + s3db.req_tabs(r, match=False)
except:
pass
try:
tabs = tabs + s3db.inv_tabs(r)
except:
pass
if settings.has_module("msg"):
tabs.append((T("Send Notification"), "dispatch"))
rheader_tabs = s3_rheader_tabs(r, tabs)
if r.name == "shelter":
location = r.table.location_id.represent(record.location_id)
rheader = DIV(TABLE(TR(TH("%s: " % T("Name")), record.name
),
TR(TH("%s: " % T("Location")), location
),
),
rheader_tabs)
else:
rheader = DIV(TABLE(TR(TH("%s: " % T("Name")), record.name
),
),
rheader_tabs)
return rheader
# =============================================================================
def cr_update_housing_unit_population(shelter_id):
"""
Update housing unit population number.
To be called onaccept/ondelete of cr_shelter_registration and
cr_shelter_allocation.
@param unit_id: the housing unit ID (when related setting is enabled)
"""
db = current.db
htable = db.cr_shelter_unit
rtable = db.cr_shelter_registration
query = (htable.shelter_id == shelter_id) & \
(htable.status == 1) & \
(htable.deleted != True)
rows = db(query).select(htable.id, htable.capacity_day, htable.capacity_night)
# all housing units need to be updated. This is necessary because evacuees
# (or groups) could be moved within the same shelter.
for row in rows:
capacity_day = row.capacity_day
capacity_night = row.capacity_night
unit_id = row.id
query_d = (rtable.shelter_id == shelter_id) & \
(rtable.shelter_unit_id == unit_id) & \
(rtable.registration_status != 3) & \
(rtable.day_or_night == 2) & \
(rtable.deleted != True)
population_day = db(query_d).count()
query_n = (rtable.shelter_id == shelter_id) & \
(rtable.shelter_unit_id == unit_id) & \
(rtable.registration_status != 3) & \
(rtable.day_or_night == 1) & \
(rtable.deleted != True)
population_night = db(query_n).count()
if capacity_day:
available_capacity_day = capacity_day - population_day
else:
capacity_day = 0
available_capacity_day = 0
if capacity_night:
available_capacity_night = capacity_night - \
population_night
else:
capacity_day = 0
available_capacity_night = 0
db(htable._id==unit_id).update(available_capacity_day=available_capacity_day,
available_capacity_night = available_capacity_night,
population_day=population_day,
population_night=population_night)
cr_check_population_availability(unit_id, htable)
return
# =============================================================================
def cr_update_shelter_population(shelter_id):
"""
Update population and available capacity numbers, to be
called onaccept/ondelete of cr_shelter_registration and
cr_shelter_allocation.
@param shelter_id: the shelter record ID
"""
db = current.db
s3db = current.s3db
stable = s3db.cr_shelter
atable = s3db.cr_shelter_allocation
rtable = db.cr_shelter_registration
# Get the shelter record
record = db(stable._id == shelter_id).select(stable.id,
stable.capacity_day,
stable.capacity_night,
limitby=(0, 1)).first()
# Get population numbers
query = (rtable.shelter_id == shelter_id) & \
(rtable.registration_status != 3) & \
(rtable.deleted != True)
cnt = rtable._id.count()
rows = db(query).select(rtable.day_or_night, cnt,
groupby=rtable.day_or_night,
orderby=rtable.day_or_night)
population_day = population_night = 0
for row in rows:
reg_type = row[rtable.day_or_night]
number = row[cnt]
if reg_type == NIGHT and number:
population_night = number
elif reg_type == DAY_AND_NIGHT and number:
population_day = number
# Get allocation numbers
query = (atable.shelter_id == shelter_id) & \
(atable.status.belongs((1,2,3,4))) & \
(atable.deleted != True)
dcnt = atable.group_size_day.sum()
ncnt = atable.group_size_night.sum()
row = db(query).select(dcnt, ncnt, limitby=(0, 1), orderby=dcnt).first()
if row:
if row[dcnt] is not None:
allocated_capacity_day = row[dcnt]
else:
allocated_capacity_day = 0
if row[ncnt] is not None:
allocated_capacity_night = row[ncnt]
else:
allocated_capacity_night = 0
else:
allocated_capacity_day = allocated_capacity_night = 0
# Compute available capacity
capacity_day = record.capacity_day
if capacity_day:
available_capacity_day = capacity_day - \
population_day - \
allocated_capacity_day
else:
available_capacity_day = 0
capacity_night = record.capacity_night
if capacity_night:
available_capacity_night = record.capacity_night - \
population_night - \
allocated_capacity_night
else:
available_capacity_night = 0
if current.deployment_settings.get_cr_shelter_housing_unit_management():
cr_update_housing_unit_population(shelter_id)
# Update record
record.update_record(population_day=population_day,
population_night=population_night,
available_capacity_day=available_capacity_day,
available_capacity_night=available_capacity_night)
cr_check_population_availability(shelter_id, stable)
return
# =============================================================================
def cr_check_population_availability(unit_id, table):
"""
Evaluate the population capacity availability.
Show a non blocking warning in case the people in the shelter/housing unit are more than its capacity
@param unit_id: the shelter ID / housing unit ID
@param table: related tablename (cr_shelter or cr_shelter_housing_unit)
"""
db = current.db
response = current.response
T = current.T
record = db(table.id == unit_id).select(table.capacity_day,
table.population_day,
table.capacity_night,
table.population_night,
limitby=(0, 1)
).first()
capacity_day = record.capacity_day
population_day = record.population_day
if (capacity_day is not None) and (population_day > capacity_day):
if table._tablename == "cr_shelter":
response.warning = T("Warning: this shelter is full for daytime")
elif table._tablename == "cr_shelter_unit":
response.warning = T("Warning: this housing unit is full for daytime")
capacity_night = record.capacity_night
population_night = record.population_night
if (capacity_night is not None) and (population_night > capacity_night):
if table._tablename == "cr_shelter":
response.warning = T("Warning: this shelter is full for the night")
elif table._tablename == "cr_shelter_unit":
response.warning = T("Warning: this housing unit is full for the night")
return
# =============================================================================
def cr_update_capacity_from_housing_units(shelter_id):
"""
Update shelter capacity numbers, new capacity numbers are evaluated
adding together all housing unit capacities.
To be called onaccept/ondelete of cr_shelter_registration and
cr_shelter_allocation.
@param shelter_id: the shelter record ID
"""
db = current.db
stable = db.cr_shelter
htable = db.cr_shelter_unit
query = (htable.shelter_id == shelter_id) & \
(htable.status == 1) & \
(htable.deleted != True)
total_capacity_day = htable.capacity_day.sum()
total_capacity_night = htable.capacity_night.sum()
capacity_count = db(query).select(total_capacity_day, total_capacity_night)
if capacity_count:
total_capacity_day = capacity_count[0]._extra[total_capacity_day]
total_capacity_night = capacity_count[0]._extra[total_capacity_night]
else:
total_capacity_day = total_capacity_night = 0
db(stable._id==shelter_id).update(capacity_day=total_capacity_day,
capacity_night = total_capacity_night)
return
# =============================================================================
def cr_notification_dispatcher(r, **attr):
"""
Send a notification.
"""
if r.representation == "html" and \
r.name == "shelter" and r.id and not r.component:
T = current.T
msg = current.msg
s3db = current.s3db
record = r.record
ctable = s3db.pr_contact
stable = s3db.cr_shelter
message = ""
text = ""
s_id = record.id
s_name = record.name
s_phone = record.phone
s_email = record.email
s_status = record.status
if s_phone in ("", None):
s_phone = T("Not Defined")
if s_email in ("", None):
s_phone = T("Not Defined")
if s_status in ("", None):
s_status = T("Not Defined")
else:
if s_status == 1:
s_status = "Open"
elif s_status == 2:
s_status = "Close"
else:
s_status = "Unassigned Shelter Status"
text += "************************************************"
text += "\n%s " % T("Automatic Message")
text += "\n%s: %s " % (T("Shelter ID"), s_id)
text += " %s: %s" % (T("Shelter name"), s_name)
text += "\n%s: %s " % (T("Email"), s_email)
text += " %s: %s" % (T("Phone"), s_phone)
text += "\n%s: %s " % (T("Working Status"), s_status)
text += "\n************************************************\n"
# Encode the message as an OpenGeoSMS
#message = msg.prepare_opengeosms(record.location_id,
# code="ST",
# map="google",
# text=text)
# URL to redirect to after message sent
url = URL(c="cr", f="shelter", args=r.id)
# Create the form
opts = dict(type="SMS",
# @ToDo: deployment_setting
subject = T("Deployment Request"),
message = message + text,
url = url,
)
output = msg.compose(**opts)
# Maintain RHeader for consistency
if attr.get("rheader"):
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = T("Send Notification")
current.response.view = "msg/compose.html"
return output
else:
raise HTTP(501, current.messages.BADMETHOD)
# END =========================================================================
|
py | b4128aa921ceb3b2d89f7eb264cdf8512294f377 | # Generated by Django 3.1.12 on 2021-06-13 04:42
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('job', '0006_auto_20210613_0442'),
]
operations = [
migrations.AlterField(
model_name='job',
name='create_at',
field=models.DateTimeField(default=datetime.datetime(2021, 6, 13, 4, 42, 26, 325262, tzinfo=utc)),
),
migrations.AlterField(
model_name='job',
name='update_at',
field=models.DateTimeField(default=datetime.datetime(2021, 6, 13, 4, 42, 26, 325285, tzinfo=utc)),
),
]
|
py | b4128c83b7ca6d31d87e9f71d910ebdb6adc9e2f | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from album_metadata import *
from random import choice
from yt_fetch import *
import string
from spotify_playlist import SpotifyEmbed
def markup(userRequest, albumInfo, contentSite, parseFunc, encoding):
loadergif = "<img class=\"loader\" src=\"{{ url_for('static', filename='loader.gif') }}\" alt=\"Publishing...\" />"
linebreak = "<br />"
hrline = "<hr />"
htmlfoo = albumInfo.search(userRequest, contentSite)
parseFunc(htmlfoo)
contentSitename = contentSite.lower()
if contentSitename == 'allmusic'.lower():
metadata = albumInfo.allmusicMetadata
elif contentSitename == 'rateyourmusic'.lower():
metadata = albumInfo.rymMetadata
elif contentSitename == 'discogs'.lower():
metadata = albumInfo.discogsMetadata
elif contentSitename == 'itunes'.lower():
metadata = albumInfo.itunesMetadata
elif contentSitename == 'pitchfork'.lower():
metadata = albumInfo.pitchforkMetadata
elif contentSitename == 'sputnikmusic'.lower():
metadata = albumInfo.sputnikmusicMetadata
elif contentSitename == 'rollingstone'.lower():
metadata = albumInfo.rsMetadata
elif contentSitename == 'metacritic'.lower():
metadata = albumInfo.metacriticMetadata
try:
if metadata['rating']:
ratingMarkup = "<a href=\"" + albumInfo.pageUrl + '" target="_blank">' + "<b>" + contentSite.title() + "</b>" + "</a>" + " - " + metadata['rating'].decode(encoding) + linebreak
ratingMarkedup = True
else:
if not albumInfo.pageUrl:
ratingMarkup = "<a href=\"" + albumInfo.searchUrl.strip("&btnI") + '" target="_blank">' + "<b>" + contentSite.title() + "</b>" + "</a>" + linebreak
ratingMarkedup = False
else:
ratingMarkup = "<a href=\"" + albumInfo.pageUrl + '" target="_blank">' + "<b>" + contentSite.title() + "</b>" + "</a>" + linebreak
ratingMarkedup = True
if not metadata['review'][0]:
reviewMarkup = ""
reviewMarkedup = False
else:
reviewMarkup = ""
for eachReview in metadata['review']:
reviewMarkup = reviewMarkup + linebreak + "<i>" + '"' + eachReview.decode(encoding) + '"' + "</i>" + linebreak
reviewMarkedup = True
if not ratingMarkedup and not reviewMarkedup:
markup = ratingMarkup
else:
markup = ratingMarkup + reviewMarkup
except:
markup = "<i>Oops, content not found.</i>"
if not albumInfo.pageUrl:
html = markup + "<br/><i>Album not found.</i>"
else:
html = markup
if contentSitename == 'allmusic'.lower():
try:
info = make_tracklist(albumInfo.songList, albumInfo.albumart, albumInfo.genre, albumInfo.styles).decode('utf-8')
except:
info = ""
if info:
html = "<div class=\"info\">" + info + "</div>" + hrline + "<p>" + html + "</p>"
else:
html = info + "<p>" + html + "</p>"
return html
def make_tracklist(songList, imageFile, genre, styles):
tracklisting = "<b><i>Track Listing:</b></i><br/>"
if genre:
if styles:
albumGenre = "<b><i>Genre:</b></i> " + "<i>" + genre + " (" + styles + ")</i><br /><br />"
else:
albumGenre = "<b><i>Genre:</b></i> " + "<i>" + genre + "</i><br /><br />"
else:
albumGenre = ""
if songList:
for eachSong in songList:
if eachSong != songList[-1]:
tracklisting = tracklisting + "<i>" + eachSong + "</i>" + " - "
else:
tracklisting = tracklisting + "<i>" + eachSong + "</i>"
else:
tracklisting = ""
if imageFile:
albumpic = "<img class=\"albumart\" width=\"200\" height=\"200\" src=\"" + imageFile + \
"\" alt=\"Album Art\" /><br /><br />"
else:
albumpic = ""
html = str(albumpic) + str(albumGenre) + str(tracklisting)
return html
def make_html(userRequest, urlCount):
albumInfo = album_metadata()
loadergif = "<img class=\"loader\" src=\"{{ url_for('static', filename='loader.gif') }}\" alt=\"Publishing...\" />"
linebreak = "<br />"
hrline = "<hr />"
segmented = False
if urlCount == 1:
html = "<p>" + markup(userRequest, albumInfo, 'allmusic', albumInfo.allmusic_parse, 'utf-8') + "</p>"
elif urlCount == 2:
htmlfoo = albumInfo.search(userRequest, 'allmusic')
albumInfo.allmusic_parse(htmlfoo, getAlbumArt = False, getGenre = False, getStyles = False)
if not albumInfo.songList:
try:
randomSongChosen = ytMetadata().SearchAndPrint(userRequest.encode('utf-8'))
except:
randomSongChosen = ""
else:
for i in range(0, 3):
try:
randomSongChosen = ytMetadata().SearchAndPrint(choice(albumInfo.songList) + " " + userRequest.encode('utf-8'))
break
except:
randomSongChosen = ""
continue
if not randomSongChosen:
return "<i>Youtube Video not found.</i>"
youtubeEmbed = '<iframe title="Youtube video player" width="50%" height="380" ' + \
'src="http://www.youtube.com/embed/' + randomSongChosen + \
'" frameborder="0" allowfullscreen></iframe>'
html = youtubeEmbed
segmented = True
elif urlCount == 3:
album = SpotifyEmbed(userRequest)
segmented = True
try:
album_uri = album.get_album_uri()
html = album.generate_embed_code(album_uri) + hrline
except:
html = hrline + "<i>Album not found on Spotify.</i>" + hrline
elif urlCount == 4:
html = "<p>" + markup(userRequest, albumInfo, 'rateyourmusic', albumInfo.rym_parse, 'utf-8') + "</p>"
elif urlCount == 5:
html = "<p>" + markup(userRequest, albumInfo, 'discogs', albumInfo.discogs_parse, 'utf-8') + "</p>"
elif urlCount == 6:
html = "<p>" + markup(userRequest, albumInfo, 'itunes', albumInfo.itunes_parse, 'utf-8') + "</p>"
elif urlCount == 7:
html = "<p>" + markup(userRequest, albumInfo, 'pitchfork', albumInfo.pitchfork_parse, 'utf-8') + "</p>"
elif urlCount == 8:
html = "<p>" + markup(userRequest, albumInfo, 'sputnikmusic', albumInfo.sputnikmusic_parse, 'utf-8') + "</p>"
elif urlCount == 9:
html = "<p>" + markup(userRequest, albumInfo, 'rollingstone', albumInfo.rs_parse, 'utf-8') + "</p>"
elif urlCount == 10:
html = "<p>" + markup(userRequest, albumInfo, 'metacritic', albumInfo.metacritic_parse, 'utf-8') + "</p>"
segmented = True
#print albumInfo.allmusicMetadata
#print
#print albumInfo.rymMetadata
#print
#print albumInfo.discogsMetadata
#html = allmusicMarkup + hrline + rymMarkup + hrline + discogsMarkup + hrline + youtubeEmbed
if segmented:
return html
else:
return html + hrline
if __name__ == "__main__":
make_html('live', 4)
|
py | b4128cd9c23beb9c6c50c08f31949d9a4dcd5cae | try:
n = float(input("Enter a Number: "))
result = n ** 0.5
print(result)
except ValueError:
print("\nInvalid Input")
|
py | b4128d69bf61a47550d960a6abc9def1c021c9cd | from invoke import task
@task
def next_version(ctx, version_part):
"""Prepares the next release.
version_part: Part of the version to bump (patch, minor, major).
"""
res = ctx.run(
'bumpversion --dry-run --allow-dirty --list {} | grep new_version | sed s,"^.*=",,'.format(version_part),
hide=True
)
print("Next version is ", res.stdout)
ctx.run(
"bumpversion {} && git diff".format(version_part)
)
print("Review your version changes first")
print("Accept your version: invoke release.accept-version")
print("Accept your version: invoke release.revoke-version")
@task
def accept_version(ctx):
"""Accepts the staged version."""
ctx.run("git push && git push --tags")
@task
def revoke_version(ctx):
"""Rollback the staged version."""
# Remove the tag and rollback the commit
ctx.run(
"git tag -d `git describe --tags --abbrev=0` && git reset --hard HEAD~1 "
)
@task
def dist(ctx):
"""Create a python distribution (source and wheel)."""
ctx.run("poetry build")
@task(dist)
def testpypi(ctx):
"""Publishes the package to testpypi."""
ctx.run("poetry publish --repository testpypi")
@task(dist)
def pypi(ctx):
"""Publishes the package to pypi."""
ctx.run("poetry publish")
|
py | b4128dee19b42c70fc6daf2aa7bac0cb8278640c | # Python libraries
import argparse, os
import torch
# Lib files
import lib.utils as utils
import lib.medloaders as medical_loaders
import lib.medzoo as medzoo
import lib.train as train
from lib.losses3D import DiceLoss
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
seed = 1777777
torch.manual_seed(seed)
def main():
args = get_arguments()
utils.reproducibility(args, seed)
utils.make_dirs(args.save)
training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args,
path=args.dataset_path)
model, optimizer = medzoo.create_model(args)
criterion = DiceLoss(classes=11, skip_index_after=args.classes)
if args.cuda:
model = model.cuda()
trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator,
valid_data_loader=val_generator, lr_scheduler=None)
trainer.training()
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--batchSz', type=int, default=4)
parser.add_argument('--dataset_name', type=str, default="mrbrains4")
parser.add_argument('--dataset_path', type=str, default="datasets")
parser.add_argument('--dim', nargs="+", type=int, default=(128, 128, 48))
parser.add_argument('--nEpochs', type=int, default=200)
parser.add_argument('--inChannels', type=int, default=3)
parser.add_argument('--inModalities', type=int, default=3)
parser.add_argument('--terminal_show_freq', default=50)
parser.add_argument('--samples_train', type=int, default=10)
parser.add_argument('--samples_val', type=int, default=10)
parser.add_argument('--classes', type=int, default=2)
parser.add_argument('--threshold', default=0.1, type=float)
parser.add_argument('--augmentation', default='no', type=str,
help='Tensor normalization: options max, mean, global')
parser.add_argument('--normalization', default='global_mean', type=str,
help='Tensor normalization: options max, mean, global')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--split', default=0.9, type=float, help='Select percentage of training data(default: 0.8)')
parser.add_argument('--lr', default=1e-3, type=float,
help='learning rate (default: 1e-3)')
parser.add_argument('--cuda', action='store_true', default=False)
parser.add_argument('--model', type=str, default='UNET3D',
choices=('VNET', 'VNET2', 'UNET3D', 'DENSENET1', 'DENSENET2', 'DENSENET3', 'HYPERDENSENET'))
parser.add_argument('--opt', type=str, default='sgd',
choices=('sgd', 'adam', 'rmsprop'))
parser.add_argument('--log_dir', type=str,
default='../runs/')
args = parser.parse_args()
args.save = '../saved_models/' + args.model + '_checkpoints/' + args.model + '_{}_{}_'.format(
utils.datestr(), args.dataset_name)
return args
if __name__ == '__main__':
main()
|
py | b4128e8f5e96418794befb34605dcec4f2080877 | import json
import logging
import os
import re
import unittest2
import yaml
from drheader import Drheader
class TestBase(unittest2.TestCase):
def setUp(self):
self.logger = logging.Logger
def tearDown(self):
with open(os.path.join(os.path.dirname(__file__), '../../drheader/rules.yml')) as rules_file:
default_rules = yaml.safe_load(rules_file.read())
with open(os.path.join(os.path.dirname(__file__), '../test_resources/default_rules.yml'), 'w') as rules_file:
yaml.dump(default_rules, rules_file, sort_keys=False)
def process_test(self, url=None, method="GET", headers=None, status_code=None):
with open(os.path.join(os.path.dirname(__file__), '../test_resources/default_rules.yml')) as rules_file:
rules = yaml.safe_load(rules_file.read())['Headers']
self.instance = Drheader(url=url, method=method, headers=headers, status_code=status_code)
self.instance.analyze(rules=rules)
@staticmethod
def get_headers():
with open(os.path.join(os.path.dirname(__file__), '../test_resources/headers_ok.json')) as headers_file:
return json.loads(headers_file.read())
@staticmethod
def add_or_modify_header(header_name, update_value, headers=None):
headers = TestBase.get_headers() if not headers else headers
headers[header_name] = update_value
return headers
@staticmethod
def delete_header(header_name, headers=None):
headers = TestBase.get_headers() if not headers else headers
if header_name in headers:
headers.pop(header_name)
return headers
@staticmethod
def modify_directive(header_name, update_value, pattern, headers=None):
headers = TestBase.get_headers() if not headers else headers
if header_name in headers:
search_result = re.search(pattern, headers[header_name])
if search_result:
headers[header_name] = headers[header_name].replace(search_result.group(), update_value)
else:
headers[header_name] = headers[header_name] + '; ' + update_value
else:
headers[header_name] = update_value
return headers
@staticmethod
def build_error_message(report, expected_report=None, rule=None, append_text=None):
if expected_report is None:
expected_report = []
elif type(expected_report) is dict:
expected_report = expected_report.items()
unexpected_items = []
for item in report:
if rule and item['rule'] == rule and item not in expected_report:
unexpected_items.append(item)
elif not rule and item not in expected_report:
unexpected_items.append(item)
missing_items = []
for item in expected_report:
if item not in report:
missing_items.append(item)
error_message = "\n"
if len(unexpected_items) > 0:
error_message += "\nThe following items were found but were not expected in the report: \n"
error_message += json.dumps(unexpected_items, indent=2)
if len(missing_items) > 0:
error_message += "\nThe following items were not found but were expected in the report: \n"
error_message += json.dumps(missing_items, indent=2)
if append_text:
error_message = '%s\n\n%s' % (error_message, append_text)
return error_message
# start unittest2 to run these tests
if __name__ == "__main__":
unittest2.main()
|
py | b4128eb581607a54d0ad6ca9666e1c1e17ef7e4e | from ackward import (Class,
method,
Namespace,
TranslationUnit)
def tunit():
return TranslationUnit(
guard='INCLUDE_ACKWARD_RE_REGEX_OBJECT_HPP',
forward_declarations=[('ackward', 're', 'class MatchObject')],
header_includes=[('string',)],
impl_includes=[('ackward', 're', 'RegexObject.hpp'),
('ackward', 're', 'MatchObject.hpp')])
def definition(env):
t = tunit()
ns = Namespace('ackward', 're', parent=t)
cls = Class(name='RegexObject',
wrapped_class='_sre.SRE_Pattern',
parent=ns)
method('MatchObject match(std::wstring s) const',
parent=cls)
method('MatchObject match(std::wstring s, int pos)',
parent=cls)
method('MatchObject match(std::wstring s, int pos, int endpos)',
parent=cls)
method('MatchObject search(std::wstring s) const',
parent=cls)
method('MatchObject search(std::wstring s, int pos)',
parent=cls)
method('MatchObject search(std::wstring s, int pos, int endpos)',
parent=cls)
method('boost::python::list split(std::wstring s) const',
parent=cls)
method('boost::python::list split(std::wstring s, int maxsplit) const',
parent=cls)
# RegexObject.findall(string[, pos[, endpos]])
# Identical to the findall() function, using the compiled pattern.
# RegexObject.finditer(string[, pos[, endpos]])
# Identical to the finditer() function, using the compiled pattern.
# RegexObject.sub(repl, string[, count=0])
# Identical to the sub() function, using the compiled pattern.
# RegexObject.subn(repl, string[, count=0])
# Identical to the subn() function, using the compiled pattern.
# RegexObject.flags
# The flags argument used when the RE object was compiled, or 0 if no flags were provided.
# RegexObject.groups
# The number of capturing groups in the pattern.
# RegexObject.groupindex
# A dictionary mapping any symbolic group names defined by (?P<id>) to group numbers. The dictionary is empty if no symbolic groups were used in the pattern.
# RegexObject.pattern
# The pattern string from which the RE object was compiled.
return t
|
py | b4128f0f9fe36004537e424757c82a2afe4f3c9e | # pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("slcext.dll")
prototypes = \
{
#
'SLActivateProduct': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"cbSize": SimTypeInt(signed=False, label="UInt32"), "type": SimTypeInt(signed=False, label="SL_ACTIVATION_TYPE")}, name="SL_ACTIVATION_INFO_HEADER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeShort(signed=False, label="UInt16")], SimTypeInt(signed=True, label="Int32"), arg_names=["hSLC", "pProductSkuId", "cbAppSpecificData", "pvAppSpecificData", "pActivationInfo", "pwszProxyServer", "wProxyPort"]),
#
'SLGetServerStatus': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pwszServerURL", "pwszAcquisitionType", "pwszProxyServer", "wProxyPort", "phrStatus"]),
#
'SLAcquireGenuineTicket': SimTypeFunction([SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["ppTicketBlob", "pcbTicketBlob", "pwszTemplateId", "pwszServerUrl", "pwszClientToken"]),
#
'SLGetReferralInformation': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="SLREFERRALTYPE"), SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hSLC", "eReferralType", "pSkuOrAppId", "pwszValueName", "ppwszValue"]),
}
lib.set_prototypes(prototypes)
|
py | b4128f32e72c3932d9ae475b10d8b34b86263fec | # -*- coding: utf-8 -*-
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# BSD 2-Clause License #
# #
# Copyright (c) 2020, Patrick Hohenecker #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# #
# 1. Redistributions of source code must retain the above copyright notice, this #
# list of conditions and the following disclaimer. #
# #
# 2. Redistributions in binary form must reproduce the above copyright notice, #
# this list of conditions and the following disclaimer in the documentation #
# and/or other materials provided with the distribution. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE #
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, #
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import typing
import argmagiq.parsers.data_type_parser as data_type_parser
__author__ = "Patrick Hohenecker"
__copyright__ = "Copyright (c) 2020, Patrick Hohenecker"
__license__ = "BSD-2-Clause"
__version__ = "0.1.0"
__date__ = "29 Jun 2020"
__maintainer__ = "Patrick Hohenecker"
__email__ = "[email protected]"
__status__ = "Development"
class StrParser(data_type_parser.DataTypeParser):
"""A parser for configuration values of type ``str``."""
def _parse(self, argv: typing.Tuple[str, ...]) -> typing.Tuple[typing.Any, typing.Tuple[str, ...]]:
if len(argv) < 2:
raise ValueError(f"Option {self._arg_name} requires an argument")
return argv[1], argv[2:]
def parse_json(self, json_value: typing.Any) -> typing.Any:
return str(json_value)
|
py | b4128f56af545ba376447d9ccba1d779fbdc4128 | #!/usr/bin/env python
from Bio.Seq import Seq
from Bio import SeqIO
import sys
trimmed_reads = []
infile = sys.argv[1]
outfile = sys.argv[2]
size = int(sys.argv[3])
for seq_record in SeqIO.parse(infile, "fastq"):
trimmed_reads.append(seq_record[100:(101 + size)])
SeqIO.write(trimmed_reads, outfile, "fastq")
|
py | b41290dd139a4ee07c95b521c86f4f5b83fa0d70 | import enum
from typing import Optional
class TreeView:
def __init__(self, id, name, contextual_title, menus):
self.id = id
self.name = name
self.contextual_title = contextual_title
self.menus = menus
class TreeViewContainer:
def __init__(self, id, title, icon, tree_views):
self.id = id
self.title = title
self.icon = icon
self.tree_views = tree_views
class MenuGroup(enum.Enum):
NAVIGATION = "navigation"
class Menu:
def __init__(
self, command_id, group: Optional[MenuGroup] = None, when: Optional[str] = None
):
self.command_id = command_id
self.group = group
self.when = when
TREE_VIEW_CONTAINERS = [
TreeViewContainer(
id="robocorp-robots",
title="Robocorp Code",
icon="images/robocorp-outline.svg",
tree_views=[
TreeView(
id="robocorp-robots-tree",
name="Robots",
contextual_title="Robots",
menus=[
Menu(
"robocorp.robotsViewTaskRun",
MenuGroup.NAVIGATION,
"robocorp-code:single-task-selected",
),
Menu(
"robocorp.robotsViewTaskDebug",
MenuGroup.NAVIGATION,
"robocorp-code:single-task-selected",
),
Menu("robocorp.refreshRobotsView", MenuGroup.NAVIGATION),
],
),
# TreeView(id="robocorp-tasks-tree", name="Tasks", contextual_title="Tasks"),
],
)
]
def get_views_containers():
activity_bar_contents = [
{
"id": tree_view_container.id,
"title": tree_view_container.title,
"icon": tree_view_container.icon,
}
for tree_view_container in TREE_VIEW_CONTAINERS
]
return {"activitybar": activity_bar_contents}
def get_tree_views():
ret = {}
for tree_view_container in TREE_VIEW_CONTAINERS:
ret[tree_view_container.id] = [
{"id": tree.id, "name": tree.name, "contextualTitle": tree.contextual_title}
for tree in tree_view_container.tree_views
]
return ret
def get_activation_events_for_json():
activation_events = []
for tree_view_container in TREE_VIEW_CONTAINERS:
for tree_viewer in tree_view_container.tree_views:
activation_events.append("onView:" + tree_viewer.id)
return activation_events
def get_menus():
menus = []
for tree_view_container in TREE_VIEW_CONTAINERS:
for tree_viewer in tree_view_container.tree_views:
menu: Menu
for menu in tree_viewer.menus:
when = f"view == {tree_viewer.id}"
if menu.when:
when += f" && {menu.when}"
item = {"command": menu.command_id, "when": when}
if menu.group:
item["group"] = menu.group.value
menus.append(item)
return menus
|
py | b4129174614f38d8ac4c780ca1ca55d217f1236c | from converter.qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr)
qc.cx(qr[0], qr[1])
qc.x(qr[1])
qc.cx(qr[0], qr[1])
qc.x(qr[0])
qc.cx(qr[0], qr[1])
qc.cx(qr[0], qr[1])
qc.measure(qr, cr)
|
py | b41292a15fc1ba1880365396bfccbcd1a714cf36 | """Test the Z-Wave JS diagnostics."""
from unittest.mock import patch
import pytest
from zwave_js_server.const import CommandClass
from zwave_js_server.event import Event
from zwave_js_server.model.value import _get_value_id_from_dict, get_value_id
from homeassistant.components.zwave_js.diagnostics import async_get_device_diagnostics
from homeassistant.components.zwave_js.helpers import get_device_id
from homeassistant.helpers.device_registry import async_get
from .common import PROPERTY_ULTRAVIOLET
from tests.components.diagnostics import (
get_diagnostics_for_config_entry,
get_diagnostics_for_device,
)
async def test_config_entry_diagnostics(hass, hass_client, integration):
"""Test the config entry level diagnostics data dump."""
with patch(
"homeassistant.components.zwave_js.diagnostics.dump_msgs",
return_value=[{"hello": "world"}, {"second": "msg"}],
):
assert await get_diagnostics_for_config_entry(
hass, hass_client, integration
) == [{"hello": "world"}, {"second": "msg"}]
async def test_device_diagnostics(
hass,
client,
multisensor_6,
integration,
hass_client,
version_state,
):
"""Test the device level diagnostics data dump."""
dev_reg = async_get(hass)
device = dev_reg.async_get_device({get_device_id(client, multisensor_6)})
assert device
# Update a value and ensure it is reflected in the node state
value_id = get_value_id(
multisensor_6, CommandClass.SENSOR_MULTILEVEL, PROPERTY_ULTRAVIOLET
)
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": multisensor_6.node_id,
"args": {
"commandClassName": "Multilevel Sensor",
"commandClass": 49,
"endpoint": 0,
"property": PROPERTY_ULTRAVIOLET,
"newValue": 1,
"prevValue": 0,
"propertyName": PROPERTY_ULTRAVIOLET,
},
},
)
multisensor_6.receive_event(event)
diagnostics_data = await get_diagnostics_for_device(
hass, hass_client, integration, device
)
assert diagnostics_data["versionInfo"] == {
"driverVersion": version_state["driverVersion"],
"serverVersion": version_state["serverVersion"],
"minSchemaVersion": 0,
"maxSchemaVersion": 0,
}
# Assert that the data returned doesn't match the stale node state data
assert diagnostics_data["state"] != multisensor_6.data
# Replace data for the value we updated and assert the new node data is the same
# as what's returned
updated_node_data = multisensor_6.data.copy()
for idx, value in enumerate(updated_node_data["values"]):
if _get_value_id_from_dict(multisensor_6, value) == value_id:
updated_node_data["values"][idx] = multisensor_6.values[
value_id
].data.copy()
assert diagnostics_data["state"] == updated_node_data
async def test_device_diagnostics_error(hass, integration):
"""Test the device diagnostics raises exception when an invalid device is used."""
dev_reg = async_get(hass)
device = dev_reg.async_get_or_create(
config_entry_id=integration.entry_id, identifiers={("test", "test")}
)
with pytest.raises(ValueError):
await async_get_device_diagnostics(hass, integration, device)
|
py | b41292bb1851846a11eda8c64fc3c8b69fea304e | ''' network '''
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop, Adam
''' visualize '''
import matplotlib.pyplot as plt
plt.style.use('seaborn')
def NeuralNetwork(entrada, num_classes, ta):
model = Sequential()
model.add(Dense(10, activation='relu', input_shape=(entrada,)))
model.add(Dropout(0.2))
model.add(Dense(10, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='sigmoid'))
model.compile(optimizer=Adam(lr=ta),
loss='binary_crossentropy',
metrics=['accuracy'])
return model
def normalization(data):
from sklearn import preprocessing
return preprocessing.normalize([data])
def standardization(data):
from sklearn import preprocessing
X_scaled = preprocessing.scale(data)
return X_scaled
def standardization_scalar(data):
'''
Standardization (Standard Scalar)
:param data:
:return: data scaled
'''
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
data_scaled = scaler.fit_transform(data)
return data_scaled
def normalize_max_min(data):
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
data_scaled = scaler.fit_transform(data)
return data_scaled
def proportion_split(total, verbose=False):
p_train = int(total/2)
p_test = int(p_train/2)
p_lim_test = int((p_train+p_test))
if verbose:
print('Train(:%i) Test(%i:%i) Validation(:%i)' %(p_train,p_train,p_lim_test,p_lim_test))
print('-'*30)
return p_train, p_lim_test
else:
return p_train, p_lim_test
def print_score(metricas, evaluation):
print('_' * 20)
print('Model Evaluate')
print('-' * 20)
for i in range(len(evaluation)):
print(metricas[i] + ' = %.2f' % (evaluation[i]))
print('-' * 20)
def plot_log_train(log):
chaves = list(log.keys())
print(chaves)
plt.figure(figsize=(15, 6))
for i in range(len(chaves)):
plt.plot(log[chaves[i]], '-o', label=chaves[i])
plt.legend()
plt.show()
def plots_log_train(log, save=None):
chaves = list(log.keys())
fig = plt.figure(figsize=(18, 5))
ax = fig.add_subplot(121)
ax.plot(log[chaves[0]], '-o', label=chaves[0])
ax.plot(log[chaves[2]], '-o', label=chaves[2])
ax.set_title('Loss')
ax.legend()
ax = fig.add_subplot(122)
ax.set_title('Accuracy')
ax.plot(log[chaves[1]], '-o', label=chaves[1])
ax.plot(log[chaves[3]], '-o', label=chaves[3])
ax.legend()
if save:
plt.savefig(save)
plt.show()
def plot_ROC(fpr, tpr, label, save=None):
''' plot Curve ROC '''
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr, label=label)
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
if save:
plt.savefig(save)
plt.show() |
py | b412935333299bdd2aea94e8eb6de97c439f393c | # qubit number=3
# total number=2
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy0.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
py | b4129389fd27287a608503b9f45351ff918aea4e | # -*- coding: utf-8 -*-
# Copyright (c) 2006-2007, 2009-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2009 Mads Kiilerich <[email protected]>
# Copyright (c) 2010 Daniel Harding <[email protected]>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2013-2018 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Ricardo Gemignani <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2015 Radu Ciorba <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016, 2018 Ashley Whetter <[email protected]>
# Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016-2017 Moises Lopez <[email protected]>
# Copyright (c) 2016 Brian C. Lane <[email protected]>
# Copyright (c) 2017-2018 hippo91 <[email protected]>
# Copyright (c) 2017 ttenhoeve-aa <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Ville Skyttä <[email protected]>
# Copyright (c) 2018 Brian Shaginaw <[email protected]>
# Copyright (c) 2018 Caio Carrara <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""some functions that may be useful for various checkers
"""
import builtins
from functools import lru_cache, partial
import itertools
import numbers
import re
import sys
import string
from typing import Optional, Iterable, Tuple, Callable, Set, Union, Match, Dict, List
import _string # pylint: disable=wrong-import-position, wrong-import-order
import astroid
from astroid.exceptions import _NonDeducibleTypeHierarchy
from astroid import bases as _bases
from astroid import scoped_nodes
BUILTINS_NAME = builtins.__name__
COMP_NODE_TYPES = (
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
)
PY3K = sys.version_info[0] == 3
if not PY3K:
EXCEPTIONS_MODULE = "exceptions"
else:
EXCEPTIONS_MODULE = "builtins"
ABC_METHODS = {
"abc.abstractproperty",
"abc.abstractmethod",
"abc.abstractclassmethod",
"abc.abstractstaticmethod",
}
ITER_METHOD = "__iter__"
AITER_METHOD = "__aiter__"
NEXT_METHOD = "__next__"
GETITEM_METHOD = "__getitem__"
CLASS_GETITEM_METHOD = "__class_getitem__"
SETITEM_METHOD = "__setitem__"
DELITEM_METHOD = "__delitem__"
CONTAINS_METHOD = "__contains__"
KEYS_METHOD = "keys"
# Dictionary which maps the number of expected parameters a
# special method can have to a set of special methods.
# The following keys are used to denote the parameters restrictions:
#
# * None: variable number of parameters
# * number: exactly that number of parameters
# * tuple: this are the odd ones. Basically it means that the function
# can work with any number of arguments from that tuple,
# although it's best to implement it in order to accept
# all of them.
_SPECIAL_METHODS_PARAMS = {
None: ("__new__", "__init__", "__call__"),
0: (
"__del__",
"__repr__",
"__str__",
"__bytes__",
"__hash__",
"__bool__",
"__dir__",
"__len__",
"__length_hint__",
"__iter__",
"__reversed__",
"__neg__",
"__pos__",
"__abs__",
"__invert__",
"__complex__",
"__int__",
"__float__",
"__neg__",
"__pos__",
"__abs__",
"__complex__",
"__int__",
"__float__",
"__index__",
"__enter__",
"__aenter__",
"__getnewargs_ex__",
"__getnewargs__",
"__getstate__",
"__reduce__",
"__copy__",
"__unicode__",
"__nonzero__",
"__await__",
"__aiter__",
"__anext__",
"__fspath__",
),
1: (
"__format__",
"__lt__",
"__le__",
"__eq__",
"__ne__",
"__gt__",
"__ge__",
"__getattr__",
"__getattribute__",
"__delattr__",
"__delete__",
"__instancecheck__",
"__subclasscheck__",
"__getitem__",
"__missing__",
"__delitem__",
"__contains__",
"__add__",
"__sub__",
"__mul__",
"__truediv__",
"__floordiv__",
"__mod__",
"__divmod__",
"__lshift__",
"__rshift__",
"__and__",
"__xor__",
"__or__",
"__radd__",
"__rsub__",
"__rmul__",
"__rtruediv__",
"__rmod__",
"__rdivmod__",
"__rpow__",
"__rlshift__",
"__rrshift__",
"__rand__",
"__rxor__",
"__ror__",
"__iadd__",
"__isub__",
"__imul__",
"__itruediv__",
"__ifloordiv__",
"__imod__",
"__ilshift__",
"__irshift__",
"__iand__",
"__ixor__",
"__ior__",
"__ipow__",
"__setstate__",
"__reduce_ex__",
"__deepcopy__",
"__cmp__",
"__matmul__",
"__rmatmul__",
"__div__",
),
2: ("__setattr__", "__get__", "__set__", "__setitem__", "__set_name__"),
3: ("__exit__", "__aexit__"),
(0, 1): ("__round__",),
}
SPECIAL_METHODS_PARAMS = {
name: params
for params, methods in _SPECIAL_METHODS_PARAMS.items()
for name in methods # type: ignore
}
PYMETHODS = set(SPECIAL_METHODS_PARAMS)
class NoSuchArgumentError(Exception):
pass
def is_inside_except(node):
"""Returns true if node is inside the name of an except handler."""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
return current and current is current.parent.name
def is_inside_lambda(node: astroid.node_classes.NodeNG) -> bool:
"""Return true if given node is inside lambda"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Lambda):
return True
parent = parent.parent
return False
def get_all_elements(
node: astroid.node_classes.NodeNG
) -> Iterable[astroid.node_classes.NodeNG]:
"""Recursively returns all atoms in nested lists and tuples."""
if isinstance(node, (astroid.Tuple, astroid.List)):
for child in node.elts:
for e in get_all_elements(child):
yield e
else:
yield node
def clobber_in_except(
node: astroid.node_classes.NodeNG
) -> Tuple[bool, Tuple[str, str]]:
"""Checks if an assignment node in an except handler clobbers an existing
variable.
Returns (True, args for W0623) if assignment clobbers an existing variable,
(False, None) otherwise.
"""
if isinstance(node, astroid.AssignAttr):
return True, (node.attrname, "object %r" % (node.expr.as_string(),))
if isinstance(node, astroid.AssignName):
name = node.name
if is_builtin(name):
return (True, (name, "builtins"))
stmts = node.lookup(name)[1]
if stmts and not isinstance(
stmts[0].assign_type(),
(astroid.Assign, astroid.AugAssign, astroid.ExceptHandler),
):
return True, (name, "outer scope (line %s)" % stmts[0].fromlineno)
return False, None
def is_super(node: astroid.node_classes.NodeNG) -> bool:
"""return True if the node is referencing the "super" builtin function
"""
if getattr(node, "name", None) == "super" and node.root().name == BUILTINS_NAME:
return True
return False
def is_error(node: astroid.node_classes.NodeNG) -> bool:
"""return true if the function does nothing but raising an exception"""
for child_node in node.get_children():
if isinstance(child_node, astroid.Raise):
return True
return False
builtins = builtins.__dict__.copy() # type: ignore
SPECIAL_BUILTINS = ("__builtins__",) # '__path__', '__file__')
def is_builtin_object(node: astroid.node_classes.NodeNG) -> bool:
"""Returns True if the given node is an object from the __builtin__ module."""
return node and node.root().name == BUILTINS_NAME
def is_builtin(name: str) -> bool:
"""return true if <name> could be considered as a builtin defined by python
"""
return name in builtins or name in SPECIAL_BUILTINS # type: ignore
def is_defined_in_scope(
var_node: astroid.node_classes.NodeNG,
varname: str,
scope: astroid.node_classes.NodeNG,
) -> bool:
if isinstance(scope, astroid.If):
for node in scope.body:
if (
isinstance(node, astroid.Assign)
and any(
isinstance(target, astroid.AssignName) and target.name == varname
for target in node.targets
)
) or (isinstance(node, astroid.Nonlocal) and varname in node.names):
return True
elif isinstance(scope, (COMP_NODE_TYPES, astroid.For)):
for ass_node in scope.nodes_of_class(astroid.AssignName):
if ass_node.name == varname:
return True
elif isinstance(scope, astroid.With):
for expr, ids in scope.items:
if expr.parent_of(var_node):
break
if ids and isinstance(ids, astroid.AssignName) and ids.name == varname:
return True
elif isinstance(scope, (astroid.Lambda, astroid.FunctionDef)):
if scope.args.is_argument(varname):
# If the name is found inside a default value
# of a function, then let the search continue
# in the parent's tree.
if scope.args.parent_of(var_node):
try:
scope.args.default_value(varname)
scope = scope.parent
is_defined_in_scope(var_node, varname, scope)
except astroid.NoDefault:
pass
return True
if getattr(scope, "name", None) == varname:
return True
elif isinstance(scope, astroid.ExceptHandler):
if isinstance(scope.name, astroid.AssignName):
ass_node = scope.name
if ass_node.name == varname:
return True
return False
def is_defined_before(var_node: astroid.node_classes.NodeNG) -> bool:
"""return True if the variable node is defined by a parent node (list,
set, dict, or generator comprehension, lambda) or in a previous sibling
node on the same line (statement_defining ; statement_using)
"""
varname = var_node.name
_node = var_node.parent
while _node:
if is_defined_in_scope(var_node, varname, _node):
return True
_node = _node.parent
# possibly multiple statements on the same line using semi colon separator
stmt = var_node.statement()
_node = stmt.previous_sibling()
lineno = stmt.fromlineno
while _node and _node.fromlineno == lineno:
for assign_node in _node.nodes_of_class(astroid.AssignName):
if assign_node.name == varname:
return True
for imp_node in _node.nodes_of_class((astroid.ImportFrom, astroid.Import)):
if varname in [name[1] or name[0] for name in imp_node.names]:
return True
_node = _node.previous_sibling()
return False
def is_default_argument(node: astroid.node_classes.NodeNG) -> bool:
"""return true if the given Name node is used in function or lambda
default argument's value
"""
parent = node.scope()
if isinstance(parent, (astroid.FunctionDef, astroid.Lambda)):
for default_node in parent.args.defaults:
for default_name_node in default_node.nodes_of_class(astroid.Name):
if default_name_node is node:
return True
return False
def is_func_decorator(node: astroid.node_classes.NodeNG) -> bool:
"""return true if the name is used in function decorator"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Decorators):
return True
if parent.is_statement or isinstance(
parent,
(astroid.Lambda, scoped_nodes.ComprehensionScope, scoped_nodes.ListComp),
):
break
parent = parent.parent
return False
def is_ancestor_name(
frame: astroid.node_classes.NodeNG, node: astroid.node_classes.NodeNG
) -> bool:
"""return True if `frame` is an astroid.Class node with `node` in the
subtree of its bases attribute
"""
try:
bases = frame.bases
except AttributeError:
return False
for base in bases:
if node in base.nodes_of_class(astroid.Name):
return True
return False
def assign_parent(node: astroid.node_classes.NodeNG) -> astroid.node_classes.NodeNG:
"""return the higher parent which is not an AssignName, Tuple or List node
"""
while node and isinstance(node, (astroid.AssignName, astroid.Tuple, astroid.List)):
node = node.parent
return node
def overrides_a_method(class_node: astroid.node_classes.NodeNG, name: str) -> bool:
"""return True if <name> is a method overridden from an ancestor"""
for ancestor in class_node.ancestors():
if name in ancestor and isinstance(ancestor[name], astroid.FunctionDef):
return True
return False
def check_messages(*messages: str) -> Callable:
"""decorator to store messages that are handled by a checker method"""
def store_messages(func):
func.checks_msgs = messages
return func
return store_messages
class IncompleteFormatString(Exception):
"""A format string ended in the middle of a format specifier."""
class UnsupportedFormatCharacter(Exception):
"""A format character in a format string is not one of the supported
format characters."""
def __init__(self, index):
Exception.__init__(self, index)
self.index = index
def parse_format_string(
format_string: str
) -> Tuple[Set[str], int, Dict[str, str], List[str]]:
"""Parses a format string, returning a tuple of (keys, num_args), where keys
is the set of mapping keys in the format string, and num_args is the number
of arguments required by the format string. Raises
IncompleteFormatString or UnsupportedFormatCharacter if a
parse error occurs."""
keys = set()
key_types = dict()
pos_types = []
num_args = 0
def next_char(i):
i += 1
if i == len(format_string):
raise IncompleteFormatString
return (i, format_string[i])
i = 0
while i < len(format_string):
char = format_string[i]
if char == "%":
i, char = next_char(i)
# Parse the mapping key (optional).
key = None
if char == "(":
depth = 1
i, char = next_char(i)
key_start = i
while depth != 0:
if char == "(":
depth += 1
elif char == ")":
depth -= 1
i, char = next_char(i)
key_end = i - 1
key = format_string[key_start:key_end]
# Parse the conversion flags (optional).
while char in "#0- +":
i, char = next_char(i)
# Parse the minimum field width (optional).
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the precision (optional).
if char == ".":
i, char = next_char(i)
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the length modifier (optional).
if char in "hlL":
i, char = next_char(i)
# Parse the conversion type (mandatory).
if PY3K:
flags = "diouxXeEfFgGcrs%a"
else:
flags = "diouxXeEfFgGcrs%"
if char not in flags:
raise UnsupportedFormatCharacter(i)
if key:
keys.add(key)
key_types[key] = char
elif char != "%":
num_args += 1
pos_types.append(char)
i += 1
return keys, num_args, key_types, pos_types
def split_format_field_names(format_string) -> Tuple[str, Iterable[Tuple[bool, str]]]:
try:
return _string.formatter_field_name_split(format_string)
except ValueError:
raise IncompleteFormatString()
def collect_string_fields(format_string) -> Iterable[Optional[str]]:
""" Given a format string, return an iterator
of all the valid format fields. It handles nested fields
as well.
"""
formatter = string.Formatter()
try:
parseiterator = formatter.parse(format_string)
for result in parseiterator:
if all(item is None for item in result[1:]):
# not a replacement format
continue
name = result[1]
nested = result[2]
yield name
if nested:
for field in collect_string_fields(nested):
yield field
except ValueError as exc:
# Probably the format string is invalid.
if exc.args[0].startswith("cannot switch from manual"):
# On Jython, parsing a string with both manual
# and automatic positions will fail with a ValueError,
# while on CPython it will simply return the fields,
# the validation being done in the interpreter (?).
# We're just returning two mixed fields in order
# to trigger the format-combined-specification check.
yield ""
yield "1"
return
raise IncompleteFormatString(format_string)
def parse_format_method_string(
format_string: str
) -> Tuple[List[Tuple[str, List[Tuple[bool, str]]]], int, int]:
"""
Parses a PEP 3101 format string, returning a tuple of
(keyword_arguments, implicit_pos_args_cnt, explicit_pos_args),
where keyword_arguments is the set of mapping keys in the format string, implicit_pos_args_cnt
is the number of arguments required by the format string and
explicit_pos_args is the number of arguments passed with the position.
"""
keyword_arguments = []
implicit_pos_args_cnt = 0
explicit_pos_args = set()
for name in collect_string_fields(format_string):
if name and str(name).isdigit():
explicit_pos_args.add(str(name))
elif name:
keyname, fielditerator = split_format_field_names(name)
if isinstance(keyname, numbers.Number):
# In Python 2 it will return long which will lead
# to different output between 2 and 3
explicit_pos_args.add(str(keyname))
keyname = int(keyname)
try:
keyword_arguments.append((keyname, list(fielditerator)))
except ValueError:
raise IncompleteFormatString()
else:
implicit_pos_args_cnt += 1
return keyword_arguments, implicit_pos_args_cnt, len(explicit_pos_args)
def is_attr_protected(attrname: str) -> bool:
"""return True if attribute name is protected (start with _ and some other
details), False otherwise.
"""
return (
attrname[0] == "_"
and attrname != "_"
and not (attrname.startswith("__") and attrname.endswith("__"))
)
def node_frame_class(
node: astroid.node_classes.NodeNG
) -> Optional[astroid.node_classes.NodeNG]:
"""return klass node for a method node (or a staticmethod or a
classmethod), return null otherwise
"""
klass = node.frame()
while klass is not None and not isinstance(klass, astroid.ClassDef):
if klass.parent is None:
klass = None
else:
klass = klass.parent.frame()
return klass
def is_attr_private(attrname: str) -> Optional[Match[str]]:
"""Check that attribute name is private (at least two leading underscores,
at most one trailing underscore)
"""
regex = re.compile("^_{2,}.*[^_]+_?$")
return regex.match(attrname)
def get_argument_from_call(
call_node: astroid.Call, position: int = None, keyword: str = None
) -> astroid.Name:
"""Returns the specified argument from a function call.
:param astroid.Call call_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:rtype: astroid.Name
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError("Must specify at least one of: position or keyword.")
if position is not None:
try:
return call_node.args[position]
except IndexError:
pass
if keyword and call_node.keywords:
for arg in call_node.keywords:
if arg.arg == keyword:
return arg.value
raise NoSuchArgumentError
def inherit_from_std_ex(node: astroid.node_classes.NodeNG) -> bool:
"""
Return true if the given class node is subclass of
exceptions.Exception.
"""
if (
node.name in ("Exception", "BaseException")
and node.root().name == EXCEPTIONS_MODULE
):
return True
if not hasattr(node, "ancestors"):
return False
return any(inherit_from_std_ex(parent) for parent in node.ancestors(recurs=True))
def error_of_type(handler: astroid.ExceptHandler, error_type) -> bool:
"""
Check if the given exception handler catches
the given error_type.
The *handler* parameter is a node, representing an ExceptHandler node.
The *error_type* can be an exception, such as AttributeError,
the name of an exception, or it can be a tuple of errors.
The function will return True if the handler catches any of the
given errors.
"""
def stringify_error(error):
if not isinstance(error, str):
return error.__name__
return error
if not isinstance(error_type, tuple):
error_type = (error_type,) # type: ignore
expected_errors = {stringify_error(error) for error in error_type} # type: ignore
if not handler.type:
return True
return handler.catch(expected_errors)
def decorated_with_property(node: astroid.FunctionDef) -> bool:
""" Detect if the given function node is decorated with a property. """
if not node.decorators:
return False
for decorator in node.decorators.nodes:
if not isinstance(decorator, astroid.Name):
continue
try:
if _is_property_decorator(decorator):
return True
except astroid.InferenceError:
pass
return False
def _is_property_decorator(decorator: astroid.Name) -> bool:
for infered in decorator.infer():
if isinstance(infered, astroid.ClassDef):
if infered.root().name == BUILTINS_NAME and infered.name == "property":
return True
for ancestor in infered.ancestors():
if (
ancestor.name == "property"
and ancestor.root().name == BUILTINS_NAME
):
return True
return False
def decorated_with(func: astroid.FunctionDef, qnames: Iterable[str]) -> bool:
"""Determine if the `func` node has a decorator with the qualified name `qname`."""
decorators = func.decorators.nodes if func.decorators else []
for decorator_node in decorators:
try:
if any(
i is not None and i.qname() in qnames for i in decorator_node.infer()
):
return True
except astroid.InferenceError:
continue
return False
@lru_cache(maxsize=1024)
def unimplemented_abstract_methods(
node: astroid.node_classes.NodeNG, is_abstract_cb: astroid.FunctionDef = None
) -> Dict[str, astroid.node_classes.NodeNG]:
"""
Get the unimplemented abstract methods for the given *node*.
A method can be considered abstract if the callback *is_abstract_cb*
returns a ``True`` value. The check defaults to verifying that
a method is decorated with abstract methods.
The function will work only for new-style classes. For old-style
classes, it will simply return an empty dictionary.
For the rest of them, it will return a dictionary of abstract method
names and their inferred objects.
"""
if is_abstract_cb is None:
is_abstract_cb = partial(decorated_with, qnames=ABC_METHODS)
visited = {} # type: Dict[str, astroid.node_classes.NodeNG]
try:
mro = reversed(node.mro())
except NotImplementedError:
# Old style class, it will not have a mro.
return {}
except astroid.ResolveError:
# Probably inconsistent hierarchy, don'try
# to figure this out here.
return {}
for ancestor in mro:
for obj in ancestor.values():
infered = obj
if isinstance(obj, astroid.AssignName):
infered = safe_infer(obj)
if not infered:
# Might be an abstract function,
# but since we don't have enough information
# in order to take this decision, we're taking
# the *safe* decision instead.
if obj.name in visited:
del visited[obj.name]
continue
if not isinstance(infered, astroid.FunctionDef):
if obj.name in visited:
del visited[obj.name]
if isinstance(infered, astroid.FunctionDef):
# It's critical to use the original name,
# since after inferring, an object can be something
# else than expected, as in the case of the
# following assignment.
#
# class A:
# def keys(self): pass
# __iter__ = keys
abstract = is_abstract_cb(infered)
if abstract:
visited[obj.name] = infered
elif not abstract and obj.name in visited:
del visited[obj.name]
return visited
def find_try_except_wrapper_node(
node: astroid.node_classes.NodeNG
) -> Union[astroid.ExceptHandler, astroid.TryExcept]:
"""Return the ExceptHandler or the TryExcept node in which the node is."""
current = node
ignores = (astroid.ExceptHandler, astroid.TryExcept)
while current and not isinstance(current.parent, ignores):
current = current.parent
if current and isinstance(current.parent, ignores):
return current.parent
return None
def is_from_fallback_block(node: astroid.node_classes.NodeNG) -> bool:
"""Check if the given node is from a fallback import block."""
context = find_try_except_wrapper_node(node)
if not context:
return False
if isinstance(context, astroid.ExceptHandler):
other_body = context.parent.body
handlers = context.parent.handlers
else:
other_body = itertools.chain.from_iterable(
handler.body for handler in context.handlers
)
handlers = context.handlers
has_fallback_imports = any(
isinstance(import_node, (astroid.ImportFrom, astroid.Import))
for import_node in other_body
)
ignores_import_error = _except_handlers_ignores_exception(handlers, ImportError)
return ignores_import_error or has_fallback_imports
def _except_handlers_ignores_exception(
handlers: astroid.ExceptHandler, exception
) -> bool:
func = partial(error_of_type, error_type=(exception,))
return any(map(func, handlers))
def get_exception_handlers(
node: astroid.node_classes.NodeNG, exception=Exception
) -> List[astroid.ExceptHandler]:
"""Return the collections of handlers handling the exception in arguments.
Args:
node (astroid.NodeNG): A node that is potentially wrapped in a try except.
exception (builtin.Exception or str): exception or name of the exception.
Returns:
list: the collection of handlers that are handling the exception or None.
"""
context = find_try_except_wrapper_node(node)
if isinstance(context, astroid.TryExcept):
return [
handler for handler in context.handlers if error_of_type(handler, exception)
]
return None
def is_node_inside_try_except(node: astroid.Raise) -> bool:
"""Check if the node is directly under a Try/Except statement.
(but not under an ExceptHandler!)
Args:
node (astroid.Raise): the node raising the exception.
Returns:
bool: True if the node is inside a try/except statement, False otherwise.
"""
context = find_try_except_wrapper_node(node)
return isinstance(context, astroid.TryExcept)
def node_ignores_exception(
node: astroid.node_classes.NodeNG, exception=Exception
) -> bool:
"""Check if the node is in a TryExcept which handles the given exception.
If the exception is not given, the function is going to look for bare
excepts.
"""
managing_handlers = get_exception_handlers(node, exception)
if not managing_handlers:
return False
return any(managing_handlers)
def class_is_abstract(node: astroid.ClassDef) -> bool:
"""return true if the given class node should be considered as an abstract
class
"""
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
def _supports_protocol_method(value: astroid.node_classes.NodeNG, attr: str) -> bool:
try:
attributes = value.getattr(attr)
except astroid.NotFoundError:
return False
first = attributes[0]
if isinstance(first, astroid.AssignName):
if isinstance(first.parent.value, astroid.Const):
return False
return True
def is_comprehension(node: astroid.node_classes.NodeNG) -> bool:
comprehensions = (
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
)
return isinstance(node, comprehensions)
def _supports_mapping_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(
value, GETITEM_METHOD
) and _supports_protocol_method(value, KEYS_METHOD)
def _supports_membership_test_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, CONTAINS_METHOD)
def _supports_iteration_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, ITER_METHOD) or _supports_protocol_method(
value, GETITEM_METHOD
)
def _supports_async_iteration_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, AITER_METHOD)
def _supports_getitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, GETITEM_METHOD)
def _supports_setitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, SETITEM_METHOD)
def _supports_delitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, DELITEM_METHOD)
def _is_abstract_class_name(name: str) -> bool:
lname = name.lower()
is_mixin = lname.endswith("mixin")
is_abstract = lname.startswith("abstract")
is_base = lname.startswith("base") or lname.endswith("base")
return is_mixin or is_abstract or is_base
def is_inside_abstract_class(node: astroid.node_classes.NodeNG) -> bool:
while node is not None:
if isinstance(node, astroid.ClassDef):
if class_is_abstract(node):
return True
name = getattr(node, "name", None)
if name is not None and _is_abstract_class_name(name):
return True
node = node.parent
return False
def _supports_protocol(
value: astroid.node_classes.NodeNG, protocol_callback: astroid.FunctionDef
) -> bool:
if isinstance(value, astroid.ClassDef):
if not has_known_bases(value):
return True
# classobj can only be iterable if it has an iterable metaclass
meta = value.metaclass()
if meta is not None:
if protocol_callback(meta):
return True
if isinstance(value, astroid.BaseInstance):
if not has_known_bases(value):
return True
if value.has_dynamic_getattr():
return True
if protocol_callback(value):
return True
# TODO: this is not needed in astroid 2.0, where we can
# check the type using a virtual base class instead.
if (
isinstance(value, _bases.Proxy)
and isinstance(value._proxied, astroid.BaseInstance)
and has_known_bases(value._proxied)
):
value = value._proxied
return protocol_callback(value)
return False
def is_iterable(value: astroid.node_classes.NodeNG, check_async: bool = False) -> bool:
if check_async:
protocol_check = _supports_async_iteration_protocol
else:
protocol_check = _supports_iteration_protocol
return _supports_protocol(value, protocol_check)
def is_mapping(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol(value, _supports_mapping_protocol)
def supports_membership_test(value: astroid.node_classes.NodeNG) -> bool:
supported = _supports_protocol(value, _supports_membership_test_protocol)
return supported or is_iterable(value)
def supports_getitem(value: astroid.node_classes.NodeNG) -> bool:
if isinstance(value, astroid.ClassDef):
if _supports_protocol_method(value, CLASS_GETITEM_METHOD):
return True
return _supports_protocol(value, _supports_getitem_protocol)
def supports_setitem(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol(value, _supports_setitem_protocol)
def supports_delitem(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol(value, _supports_delitem_protocol)
# TODO(cpopa): deprecate these or leave them as aliases?
@lru_cache(maxsize=1024)
def safe_infer(
node: astroid.node_classes.NodeNG, context=None
) -> Optional[astroid.node_classes.NodeNG]:
"""Return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred).
"""
try:
inferit = node.infer(context=context)
value = next(inferit)
except astroid.InferenceError:
return None
try:
next(inferit)
return None # None if there is ambiguity on the inferred node
except astroid.InferenceError:
return None # there is some kind of ambiguity
except StopIteration:
return value
def has_known_bases(klass: astroid.ClassDef, context=None) -> bool:
"""Return true if all base classes of a class could be inferred."""
try:
return klass._all_bases_known
except AttributeError:
pass
for base in klass.bases:
result = safe_infer(base, context=context)
# TODO: check for A->B->A->B pattern in class structure too?
if (
not isinstance(result, astroid.ClassDef)
or result is klass
or not has_known_bases(result, context=context)
):
klass._all_bases_known = False
return False
klass._all_bases_known = True
return True
def is_none(node: astroid.node_classes.NodeNG) -> bool:
return (
node is None
or (isinstance(node, astroid.Const) and node.value is None)
or (isinstance(node, astroid.Name) and node.name == "None")
)
def node_type(node: astroid.node_classes.NodeNG) -> Optional[type]:
"""Return the inferred type for `node`
If there is more than one possible type, or if inferred type is Uninferable or None,
return None
"""
# check there is only one possible type for the assign node. Else we
# don't handle it for now
types = set()
try:
for var_type in node.infer():
if var_type == astroid.Uninferable or is_none(var_type):
continue
types.add(var_type)
if len(types) > 1:
return None
except astroid.InferenceError:
return None
return types.pop() if types else None
def is_registered_in_singledispatch_function(node: astroid.FunctionDef) -> bool:
"""Check if the given function node is a singledispatch function."""
singledispatch_qnames = (
"functools.singledispatch",
"singledispatch.singledispatch",
)
if not isinstance(node, astroid.FunctionDef):
return False
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
# func.register are function calls
if not isinstance(decorator, astroid.Call):
continue
func = decorator.func
if not isinstance(func, astroid.Attribute) or func.attrname != "register":
continue
try:
func_def = next(func.expr.infer())
except astroid.InferenceError:
continue
if isinstance(func_def, astroid.FunctionDef):
# pylint: disable=redundant-keyword-arg; some flow inference goes wrong here
return decorated_with(func_def, singledispatch_qnames)
return False
def get_node_last_lineno(node: astroid.node_classes.NodeNG) -> int:
"""
Get the last lineno of the given node. For a simple statement this will just be node.lineno,
but for a node that has child statements (e.g. a method) this will be the lineno of the last
child statement recursively.
"""
# 'finalbody' is always the last clause in a try statement, if present
if getattr(node, "finalbody", False):
return get_node_last_lineno(node.finalbody[-1])
# For if, while, and for statements 'orelse' is always the last clause.
# For try statements 'orelse' is the last in the absence of a 'finalbody'
if getattr(node, "orelse", False):
return get_node_last_lineno(node.orelse[-1])
# try statements have the 'handlers' last if there is no 'orelse' or 'finalbody'
if getattr(node, "handlers", False):
return get_node_last_lineno(node.handlers[-1])
# All compound statements have a 'body'
if getattr(node, "body", False):
return get_node_last_lineno(node.body[-1])
# Not a compound statement
return node.lineno
def is_postponed_evaluation_enabled(node: astroid.node_classes.NodeNG) -> bool:
"""Check if the postponed evaluation of annotations is enabled"""
name = "annotations"
module = node.root()
stmt = module.locals.get(name)
return (
stmt
and isinstance(stmt[0], astroid.ImportFrom)
and stmt[0].modname == "__future__"
)
def is_subclass_of(child: astroid.ClassDef, parent: astroid.ClassDef) -> bool:
"""
Check if first node is a subclass of second node.
:param child: Node to check for subclass.
:param parent: Node to check for superclass.
:returns: True if child is derived from parent. False otherwise.
"""
if not all(isinstance(node, astroid.ClassDef) for node in (child, parent)):
return False
for ancestor in child.ancestors():
try:
if astroid.helpers.is_subtype(ancestor, parent):
return True
except _NonDeducibleTypeHierarchy:
continue
return False
|
py | b41293ff2e4141d56c039f57652ac0626690c239 | import os
import shutil
import re
for f in os.listdir():
new_name = re.sub("^q", "", f)
if new_name != f:
print(f"{f} -> {new_name}")
shutil.copy(f,new_name)
|
py | b4129425b6c7128c3a1dc5acf376d67cf4da6333 | from __future__ import unicode_literals
import boto
import six
import sure # noqa
from moto import mock_sns
from moto.sns.models import DEFAULT_TOPIC_POLICY, DEFAULT_EFFECTIVE_DELIVERY_POLICY
@mock_sns
def test_create_and_delete_topic():
conn = boto.connect_sns()
conn.create_topic("some-topic")
topics_json = conn.get_all_topics()
topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"]
topics.should.have.length_of(1)
topics[0]['TopicArn'].should.equal("arn:aws:sns:us-east-1:123456789012:some-topic")
# Delete the topic
conn.delete_topic(topics[0]['TopicArn'])
# And there should now be 0 topics
topics_json = conn.get_all_topics()
topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"]
topics.should.have.length_of(0)
@mock_sns
def test_topic_attributes():
conn = boto.connect_sns()
conn.create_topic("some-topic")
topics_json = conn.get_all_topics()
topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn']
attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse']['GetTopicAttributesResult']['Attributes']
attributes["TopicArn"].should.equal("arn:aws:sns:us-east-1:123456789012:some-topic")
attributes["Owner"].should.equal(123456789012)
attributes["Policy"].should.equal(DEFAULT_TOPIC_POLICY)
attributes["DisplayName"].should.equal("")
attributes["SubscriptionsPending"].should.equal(0)
attributes["SubscriptionsConfirmed"].should.equal(0)
attributes["SubscriptionsDeleted"].should.equal(0)
attributes["DeliveryPolicy"].should.equal("")
attributes["EffectiveDeliveryPolicy"].should.equal(DEFAULT_EFFECTIVE_DELIVERY_POLICY)
# boto can't handle prefix-mandatory strings:
# i.e. unicode on Python 2 -- u"foobar"
# and bytes on Python 3 -- b"foobar"
if six.PY2:
policy = {b"foo": b"bar"}
displayname = b"My display name"
delivery = {b"http": {b"defaultHealthyRetryPolicy": {b"numRetries": 5}}}
else:
policy = {u"foo": u"bar"}
displayname = u"My display name"
delivery = {u"http": {u"defaultHealthyRetryPolicy": {u"numRetries": 5}}}
conn.set_topic_attributes(topic_arn, "Policy", policy)
conn.set_topic_attributes(topic_arn, "DisplayName", displayname)
conn.set_topic_attributes(topic_arn, "DeliveryPolicy", delivery)
attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse']['GetTopicAttributesResult']['Attributes']
attributes["Policy"].should.equal("{'foo': 'bar'}")
attributes["DisplayName"].should.equal("My display name")
attributes["DeliveryPolicy"].should.equal("{'http': {'defaultHealthyRetryPolicy': {'numRetries': 5}}}")
|
py | b4129440dac87f7df5b5c728379bec08f0cfd2de | import argparse
import asyncio
from pathlib import Path
from typing import Dict
import yaml
from qzemoji.orm import AsyncEnginew, EmojiOrm, EmojiTable
DB_PATH = Path("data/emoji.db")
def prepare(source: Path):
# exist_ok added in 3.5, god
if not source.exists():
raise FileNotFoundError(source)
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
if DB_PATH.exists():
# missing_ok added in 3.8, so test manually
DB_PATH.unlink()
def item_stream(p: Path):
with open(p, encoding="utf8") as f:
d: Dict[int, str] = yaml.safe_load(f)
yield from d.items()
async def dump_items(source: Path):
async with AsyncEnginew.sqlite3(DB_PATH) as engine:
tbl = EmojiTable(engine)
await tbl.create()
async with tbl.sess() as sess:
async with sess.begin():
for eid, text in item_stream(source):
if not text:
print(f"{eid} null value. Skipped.")
continue
sess.add(EmojiOrm(eid=eid, text=text))
await sess.commit()
if __name__ == "__main__":
psr = argparse.ArgumentParser()
psr.add_argument("-D", "--debug", help="asyncio debug mode", action="store_true")
psr.add_argument("-f", "--file", help="source file", default="data/emoji.yml", type=Path)
arg = psr.parse_args()
prepare(arg.file)
asyncio.run(dump_items(arg.file), debug=arg.debug)
|
py | b412944eec601ed590e5a8c02b07c802fa45a5ec | from waitress import serve
from config import wsgi
open("/opt/python/log/app-initialized", "w").close()
serve(wsgi.application, unix_socket="/opt/python/log/nginx.socket")
|
py | b41294b598d20a4decd0abab2bb0afa145f9ec5a | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for explorer.py
"""
# Test library imports
import pytest
# Local imports
from spyder.widgets.explorer import FileExplorerTest, ProjectExplorerTest
@pytest.fixture
def setup_file_explorer(qtbot):
"""Set up FileExplorerTest."""
widget = FileExplorerTest()
qtbot.addWidget(widget)
return widget
@pytest.fixture
def setup_project_explorer(qtbot):
"""Set up FileExplorerTest."""
widget = ProjectExplorerTest()
qtbot.addWidget(widget)
return widget
def test_file_explorer(qtbot):
"""Run FileExplorerTest."""
fe = setup_file_explorer(qtbot)
fe.resize(640, 480)
fe.show()
assert fe
def test_project_explorer(qtbot):
"""Run ProjectExplorerTest."""
pe = setup_project_explorer(qtbot)
pe.resize(640, 480)
pe.show()
assert pe
if __name__ == "__main__":
pytest.main()
|
py | b41294bdddaeb4a9bd6a5e199d50692b4b540c58 | """
utility.py
Provides a number of simple commands for working with strings.
Created By:
- Luke Rogers <https://github.com/lukeroge>
- Dabo Ross <https://github.com/daboross>
Special Thanks:
- Fletcher Boyd <https://github.com/thenoodle68>
License: GPL v3
"""
import base64
import hashlib
import collections
import re
import os
import json
import codecs
import urllib.parse
import random
import binascii
from cloudbot import hook
from cloudbot.util import formatting, web, colors
COLORS = collections.OrderedDict([
('red', '\x0304'),
('orange', '\x0307'),
('yellow', '\x0308'),
('green', '\x0309'),
('cyan', '\x0303'),
('ltblue', '\x0310'),
('rylblue', '\x0312'),
('blue', '\x0302'),
('magenta', '\x0306'),
('pink', '\x0313'),
('maroon', '\x0305')
])
# helper functions
strip_re = re.compile("(\x03|\x02|\x1f|\x0f)(?:,?\d{1,2}(?:,\d{1,2})?)?")
def strip(string):
return strip_re.sub('', string)
def translate(text, dic):
for i, j in dic.items():
text = text.replace(i, j)
return text
# on_start
@hook.on_start()
def load_text(bot):
"""
:type bot: cloudbot.bot.CloudBot
"""
global leet
with codecs.open(os.path.join(bot.data_dir, "leet.json"), encoding="utf-8") as f:
leet = json.load(f)
# misc
@hook.command("qrcode", "qr")
def qrcode(text):
"""<link> - returns a link to a QR code image for <link>"""
args = {
"cht": "qr", # chart type (QR)
"chs": "200x200", # dimensions
"chl": text # data
}
argstring = urllib.parse.urlencode(args)
link = "http://chart.googleapis.com/chart?{}".format(argstring)
return web.try_shorten(link)
# basic text tools
@hook.command("capitalize", "capitalise")
def capitalize(text):
"""<string> -- Capitalizes <string>.
:type text: str
"""
return ". ".join([sentence.capitalize() for sentence in text.split(". ")])
@hook.command
def upper(text):
"""<string> -- Convert string to uppercase."""
return text.upper()
@hook.command
def lower(text):
"""<string> -- Convert string to lowercase."""
return text.lower()
@hook.command
def titlecase(text):
"""<string> -- Convert string to title case."""
return text.title()
@hook.command
def swapcase(text):
"""<string> -- Swaps the capitalization of <string>."""
return text.swapcase()
# encoding
@hook.command("rot13")
def rot13_encode(text):
"""<string> -- Encode <string> with rot13."""
encoder = codecs.getencoder("rot-13")
return encoder(text)[0]
@hook.command("base64")
def base64_encode(text):
"""<string> -- Encode <string> with base64."""
return base64.b64encode(text.encode()).decode()
@hook.command("debase64", "unbase64")
def base64_decode(text, notice):
"""<string> -- Decode <string> with base64."""
try:
return base64.b64decode(text.encode()).decode()
except binascii.Error:
notice("Invalid base64 string '{}'".format(text))
@hook.command("isbase64", "checkbase64")
def base64_check(text):
"""<string> -- Checks if <string> is a valid base64 encoded string"""
try:
base64.b64decode(text.encode())
except binascii.Error:
return "'{}' is not a valid base64 encoded string".format(text)
else:
return "'{}' is a valid base64 encoded string".format(text)
@hook.command
def unescape(text):
"""<string> -- Unicode unescapes <string>."""
decoder = codecs.getdecoder("unicode_escape")
return decoder(text)[0]
@hook.command
def escape(text):
"""<string> -- Unicode escapes <string>."""
encoder = codecs.getencoder("unicode_escape")
return encoder(text)[0].decode()
# length
@hook.command
def length(text):
"""<string> -- Gets the length of <string>"""
return "The length of that string is {} characters.".format(len(text))
# reverse
@hook.command
def reverse(text):
"""<string> -- Reverses <string>."""
return text[::-1]
# hashing
@hook.command("hash")
def hash_command(text):
"""<string> -- Returns hashes of <string>."""
return ', '.join(x + ": " + getattr(hashlib, x)(text.encode("utf-8")).hexdigest()
for x in ['md5', 'sha1', 'sha256'])
# novelty
@hook.command
def munge(text):
"""<text> -- Munges up <text>."""
return formatting.munge(text)
@hook.command
def leet(text):
"""<text> -- Makes <text> more 1337h4x0rz."""
output = ''.join(random.choice(leet[ch]) if ch.isalpha() else ch for ch in text.lower())
return output
# Based on plugin by FurCode - <https://github.com/FurCode/RoboCop2>
@hook.command
def derpify(text):
"""<text> - returns some amusing responses from your input."""
string = text.upper()
pick_the = random.choice(["TEH", "DA"])
pick_e = random.choice(["E", "3", "A"])
pick_qt = random.choice(["?!?!??", "???!!!!??", "?!??!?", "?!?!?!???"])
pick_ex = random.choice(["1111!11", "1!11", "!!1!", "1!!!!111", "!1!111!1", "!11!111"])
pick_end = random.choice(["", "OMG", "LOL", "WTF", "WTF LOL", "OMG LOL"])
rules = {"YOU'RE": "UR", "YOUR": "UR", "YOU": "U", "WHAT THE HECK": "WTH", "WHAT THE HELL": "WTH",
"WHAT THE FUCK": "WTF",
"WHAT THE": "WT", "WHAT": "WUT", "ARE": "R", "WHY": "Y", "BE RIGHT BACK": "BRB", "BECAUSE": "B/C",
"OH MY GOD": "OMG", "O": "OH", "THE": pick_the, "TOO": "2", "TO": "2", "BE": "B", "CK": "K", "ING": "NG",
"PLEASE": "PLS", "SEE YOU": "CYA", "SEE YA": "CYA", "SCHOOL": "SKOOL", "AM": "M",
"AM GOING TO": "IAM GOING TO", "THAT": "DAT", "ICK": "IK",
"LIKE": "LIEK", "HELP": "HALP", "KE": "EK", "E": pick_e, "!": pick_ex, "?": pick_qt}
output = translate(string, rules) + " " + pick_end
return output
# colors
@hook.command
def color_parse(text):
return colors.parse(text)
# colors - based on code by Reece Selwood - <https://github.com/hitzler/homero>
@hook.command
def rainbow(text):
"""<text> -- Gives <text> rainbow colors."""
text = str(text)
text = strip(text)
col = list(COLORS.items())
out = ""
l = len(COLORS)
for i, t in enumerate(text):
if t == " ":
out += t
else:
out += col[i % l][1] + t
return out
@hook.command
def wrainbow(text):
"""<text> -- Gives each word in <text> rainbow colors."""
text = str(text)
col = list(COLORS.items())
text = strip(text).split(' ')
out = []
l = len(COLORS)
for i, t in enumerate(text):
out.append(col[i % l][1] + t)
return ' '.join(out)
@hook.command
def usa(text):
"""<text> -- Makes <text> more patriotic."""
text = strip(text)
c = [COLORS['red'], '\x0300', COLORS['blue']]
l = len(c)
out = ''
for i, t in enumerate(text):
out += c[i % l] + t
return out
@hook.command
def superscript(text):
"""<text> -- Makes <text> superscript."""
regular = "abcdefghijklmnoprstuvwxyzABDEGHIJKLMNOPRTUVW0123456789+-=()"
super_script = "ᵃᵇᶜᵈᵉᶠᵍʰⁱʲᵏˡᵐⁿᵒᵖʳˢᵗᵘᵛʷˣʸᶻᴬᴮᴰᴱᴳᴴᴵᴶᴷᴸᴹᴺᴼᴾᴿᵀᵁⱽᵂ⁰¹²³⁴⁵⁶⁷⁸⁹⁺⁻⁼⁽⁾"
result = []
for char in text:
index = regular.find(char)
if index != -1:
result.append(super_script[index])
else:
result.append(char)
return "".join(result)
|
py | b412952d51614cb9e10cc85fc4772b0e51f21b58 | import jinja2
setup_template_env = jinja2.Environment(
loader=jinja2.FileSystemLoader("./templates"),
autoescape=True
)
|
py | b4129562aa59a49ff42dc40d091f9a8e2f577a08 | __all__ = []
# The following pattern is used below for importing sub-modules:
#
# 1. "from foo import *". This imports all the names from foo.__all__ into
# this module. But, this does not put those names into the __all__ of
# this module. This enables "from sympy.physics.mechanics import kinematics" to
# work.
# 2. "import foo; __all__.extend(foo.__all__)". This adds all the names in
# foo.__all__ to the __all__ of this module. The names in __all__
# determine which names are imported when
# "from sympy.physics.mechanics import *" is done.
import kane
from kane import *
__all__.extend(kane.__all__)
import rigidbody
from rigidbody import *
__all__.extend(rigidbody.__all__)
import functions
from functions import *
__all__.extend(functions.__all__)
import particle
from particle import *
__all__.extend(particle.__all__)
import point
from point import *
__all__.extend(point.__all__)
import essential
from essential import *
__all__.extend(essential.__all__)
|
py | b41295a988524270ad3c46774161e7597a5d3590 | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/xavier_ssd/TrekBot/TrekBot_WS/src/voxblox/voxblox/include;/xavier_ssd/TrekBot/TrekBot_WS/build_isolated/voxblox".split(';') if "/xavier_ssd/TrekBot/TrekBot_WS/src/voxblox/voxblox/include;/xavier_ssd/TrekBot/TrekBot_WS/build_isolated/voxblox" != "" else []
PROJECT_CATKIN_DEPENDS = "eigen_catkin;eigen_checks;gflags_catkin;glog_catkin;minkindr".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lvoxblox_proto;-lvoxblox;/usr/local/lib/libprotobuf.a".split(';') if "-lvoxblox_proto;-lvoxblox;/usr/local/lib/libprotobuf.a" != "" else []
PROJECT_NAME = "voxblox"
PROJECT_SPACE_DIR = "/xavier_ssd/TrekBot/TrekBot_WS/devel_isolated/voxblox"
PROJECT_VERSION = "0.0.0"
|
py | b4129617fba75c22ab38f1592e43616f3fe69966 | """
Manipulate a cv2 video with shadertoy.
Dependencies:
pip install opencv-python
"""
import arcade
from arcade.experimental.shadertoy import Shadertoy
import cv2 # type: ignore
SCREEN_WIDTH = 400
SCREEN_HEIGHT = 300
SCREEN_TITLE = "ShaderToy Video"
class ShadertoyVideo(arcade.Window):
def __init__(self, width, height, title):
super().__init__(width, height, title, resizable=True)
self.shadertoy = Shadertoy(
self.get_framebuffer_size(),
"""
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Calculate the texture coordinate of the current fragment.
// This interpolates from 0,0 to 1,1 from lower left to upper right
vec2 uv = fragCoord.xy / iResolution;
// Alter texture coordinates to make some waves
vec2 pos = uv - vec2(0.5);
float dist = length(pos) - iTime / 5.0;
vec2 direction = normalize(pos);
vec2 uv2 = uv + (direction * (sin(dist * 50.0 - iTime) - 0.5)) * 0.02;
fragColor = texture(iChannel0, uv2);
}
""",
)
# INSERT YOUR OWN VIDEO HERE
self.video = cv2.VideoCapture("C:/Users/efors/Desktop/BigBuckBunny.mp4")
width, height = (
int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT)),
)
self.video_texture = self.ctx.texture((width, height), components=3)
self.video_texture.wrap_x = self.ctx.CLAMP_TO_EDGE
self.video_texture.wrap_y = self.ctx.CLAMP_TO_EDGE
self.video_texture.swizzle = "BGR1"
self.shadertoy.channel_0 = self.video_texture
self.set_size(width, height)
def on_draw(self):
self.clear()
self.shadertoy.render()
def on_update(self, delta_time: float):
self.shadertoy.time += delta_time
self.next_frame()
def on_resize(self, width: float, height: float):
super().on_resize(width, height)
self.shadertoy.resize(self.get_framebuffer_size())
def next_frame(self):
exists, frame = self.video.read()
frame = cv2.flip(frame, 0)
if exists:
self.video_texture.write(frame)
if __name__ == "__main__":
ShadertoyVideo(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
arcade.run()
|
py | b412978488e2da1bb2e799f8deb9f77361083a94 | import sys
import shlex
import gc
import signal
import random
from subprocess import PIPE
from os.path import isfile
from argparse import ArgumentParser
from math import log
from collections import defaultdict
import shared.param as param
from shared.utils import subprocess_popen, IUPAC_base_to_ACGT_base_dict as BASE2ACGT
from shared.interval_tree import bed_tree_from, is_region_in
is_pypy = '__pypy__' in sys.builtin_module_names
RATIO_OF_NON_VARIANT_TO_VARIANT = 2.0
def PypyGCCollect(signum, frame):
gc.collect()
signal.alarm(60)
def evc_base_from(base):
return base if base == "N" else BASE2ACGT[base]
def variants_map_from(variant_file_path):
"""
variants map with 1-based position as key
"""
if variant_file_path == None:
return {}
variants_map = {}
f = subprocess_popen(shlex.split("gzip -fdc %s" % (variant_file_path)))
while True:
row = f.stdout.readline()
is_finish_reading_output = row == '' and f.poll() is not None
if is_finish_reading_output:
break
if row:
columns = row.split(maxsplit=2)
ctg_name, position_str = columns[0], columns[1]
key = ctg_name + ":" + position_str
variants_map[key] = True
f.stdout.close()
f.wait()
return variants_map
def non_variants_map_near_variants_from(
variants_map,
lower_limit_to_non_variants=15,
upper_limit_to_non_variants=16
):
"""
non variants map with 1-based position as key
"""
non_variants_map = {}
non_variants_map_to_exclude = {}
for key in variants_map.keys():
ctg_name, position_str = key.split(':')
position = int(position_str)
for i in range(upper_limit_to_non_variants * 2 + 1):
position_offset = -upper_limit_to_non_variants + i
temp_position = position + position_offset
if temp_position <= 0:
continue
temp_key = ctg_name + ":" + str(temp_position)
can_add_to_non_variants_map = (
temp_key not in variants_map and
temp_key not in non_variants_map and
(
-upper_limit_to_non_variants <= position_offset <= -lower_limit_to_non_variants or
lower_limit_to_non_variants <= position_offset <= upper_limit_to_non_variants
)
)
can_add_to_non_variants_map_to_exclude = (
lower_limit_to_non_variants > position_offset > -lower_limit_to_non_variants
)
if can_add_to_non_variants_map:
non_variants_map[temp_key] = True
if can_add_to_non_variants_map_to_exclude:
non_variants_map_to_exclude[temp_key] = True
for key in non_variants_map_to_exclude.keys():
if key in non_variants_map:
del non_variants_map[key]
return non_variants_map
class CandidateStdout(object):
def __init__(self, handle):
self.stdin = handle
def __del__(self):
self.stdin.close()
def region_from(ctg_name, ctg_start=None, ctg_end=None):
"""
1-based region string [start, end]
"""
if ctg_name is None:
return ""
if (ctg_start is None) != (ctg_end is None):
return ""
if ctg_start is None and ctg_end is None:
return "{}".format(ctg_name)
return "{}:{}-{}".format(ctg_name, ctg_start, ctg_end)
def reference_sequence_from(samtools_execute_command, fasta_file_path, regions):
refernce_sequences = []
region_value_for_faidx = " ".join(regions)
samtools_faidx_process = subprocess_popen(
shlex.split("{} faidx {} {}".format(samtools_execute_command, fasta_file_path, region_value_for_faidx))
)
while True:
row = samtools_faidx_process.stdout.readline()
is_finish_reading_output = row == '' and samtools_faidx_process.poll() is not None
if is_finish_reading_output:
break
if row:
refernce_sequences.append(row.rstrip())
# first line is reference name ">xxxx", need to be ignored
reference_sequence = "".join(refernce_sequences[1:])
# uppercase for masked sequences
reference_sequence = reference_sequence.upper()
samtools_faidx_process.stdout.close()
samtools_faidx_process.wait()
if samtools_faidx_process.returncode != 0:
return None
return reference_sequence
def is_too_many_soft_clipped_bases_for_a_read_from(CIGAR):
soft_clipped_bases = 0
total_alignment_positions = 0
advance = 0
for c in str(CIGAR):
if c.isdigit():
advance = advance * 10 + int(c)
continue
if c == "S":
soft_clipped_bases += advance
total_alignment_positions += advance
advance = 0
# skip a read less than 55% aligned
return 1.0 - float(soft_clipped_bases) / (total_alignment_positions + 1) < 0.55
def make_candidates(args):
gen4Training = args.gen4Training
variant_file_path = args.var_fn
bed_file_path = args.bed_fn
fasta_file_path = args.ref_fn
ctg_name = args.ctgName
ctg_start = args.ctgStart
ctg_end = args.ctgEnd
output_probability = args.outputProb
samtools_execute_command = args.samtools
minimum_depth_for_candidate = args.minCoverage
minimum_af_for_candidate = args.threshold
minimum_mapping_quality = args.minMQ
bam_file_path = args.bam_fn
candidate_output_path = args.can_fn
is_using_stdout_for_output_candidate = candidate_output_path == "PIPE"
is_building_training_dataset = gen4Training == True
is_variant_file_given = variant_file_path is not None
is_bed_file_given = bed_file_path is not None
is_ctg_name_given = ctg_name is not None
is_ctg_range_given = is_ctg_name_given and ctg_start is not None and ctg_end is not None
if is_building_training_dataset:
# minimum_depth_for_candidate = 0
minimum_af_for_candidate = 0
# preparation for candidates near variants
need_consider_candidates_near_variant = is_building_training_dataset and is_variant_file_given
variants_map = variants_map_from(variant_file_path) if need_consider_candidates_near_variant else {}
non_variants_map = non_variants_map_near_variants_from(variants_map)
no_of_candidates_near_variant = 0
no_of_candidates_outside_variant = 0
# update output probabilities for candidates near variants
# original: (7000000.0 * 2.0 / 3000000000)
ratio_of_candidates_near_variant_to_candidates_outside_variant = 1.0
output_probability_near_variant = (
3500000.0 * ratio_of_candidates_near_variant_to_candidates_outside_variant * RATIO_OF_NON_VARIANT_TO_VARIANT / 14000000
)
output_probability_outside_variant = 3500000.0 * RATIO_OF_NON_VARIANT_TO_VARIANT / (3000000000 - 14000000)
if not isfile("{}.fai".format(fasta_file_path)):
print("Fasta index {}.fai doesn't exist.".format(fasta_file_path), file=sys.stderr)
sys.exit(1)
# 1-based regions [start, end] (start and end inclusive)
regions = []
reference_start, reference_end = None, None
if is_ctg_range_given:
reference_start, reference_end = ctg_start - param.expandReferenceRegion, ctg_end + param.expandReferenceRegion
reference_start = 1 if reference_start < 1 else reference_start
regions.append(region_from(ctg_name=ctg_name, ctg_start=reference_start, ctg_end=reference_end))
elif is_ctg_name_given:
regions.append(region_from(ctg_name=ctg_name))
reference_sequence = reference_sequence_from(
samtools_execute_command=samtools_execute_command,
fasta_file_path=fasta_file_path,
regions=regions
)
if reference_sequence is None or len(reference_sequence) == 0:
print("[ERROR] Failed to load reference seqeunce from file ({}).".format(fasta_file_path), file=sys.stderr)
sys.exit(1)
tree = bed_tree_from(bed_file_path=bed_file_path)
if is_bed_file_given and ctg_name not in tree:
print("[ERROR] ctg_name({}) not exists in bed file({}).".format(ctg_name, bed_file_path), file=sys.stderr)
sys.exit(1)
samtools_view_process = subprocess_popen(
shlex.split("{} view -F {} {} {}".format(samtools_execute_command, param.SAMTOOLS_VIEW_FILTER_FLAG, bam_file_path, " ".join(regions)))
)
if is_using_stdout_for_output_candidate:
can_fp = CandidateStdout(sys.stdout)
else:
can_fpo = open(candidate_output_path, "wb")
can_fp = subprocess_popen(shlex.split("gzip -c"), stdin=PIPE, stdout=can_fpo)
pileup = defaultdict(lambda: {"A": 0, "C": 0, "G": 0, "T": 0, "I": 0, "D": 0, "N": 0})
POS = 0
number_of_reads_processed = 0
while True:
row = samtools_view_process.stdout.readline()
is_finish_reading_output = row == '' and samtools_view_process.poll() is not None
if row:
columns = row.strip().split()
if columns[0][0] == "@":
continue
RNAME = columns[2]
if RNAME != ctg_name:
continue
POS = int(columns[3]) - 1 # switch from 1-base to 0-base to match sequence index
MAPQ = int(columns[4])
CIGAR = columns[5]
SEQ = columns[9].upper() # uppercase for SEQ (regexp is \*|[A-Za-z=.]+)
reference_position = POS
query_position = 0
if MAPQ < minimum_mapping_quality:
continue
if CIGAR == "*" or is_too_many_soft_clipped_bases_for_a_read_from(CIGAR):
continue
number_of_reads_processed += 1
advance = 0
for c in str(CIGAR):
if c.isdigit():
advance = advance * 10 + int(c)
continue
if c == "S":
query_position += advance
elif c == "M" or c == "=" or c == "X":
for _ in range(advance):
base = evc_base_from(SEQ[query_position])
pileup[reference_position][base] += 1
# those CIGAR operations consumes query and reference
reference_position += 1
query_position += 1
elif c == "I":
pileup[reference_position - 1]["I"] += 1
# insertion consumes query
query_position += advance
elif c == "D":
pileup[reference_position - 1]["D"] += 1
# deletion consumes reference
reference_position += advance
# reset advance
advance = 0
positions = [x for x in pileup.keys() if x < POS] if not is_finish_reading_output else list(pileup.keys())
positions.sort()
for zero_based_position in positions:
base_count = depth = reference_base = temp_key = None
# ctg and bed checking (region [ctg_start, ctg_end] is 1-based, inclusive start and end positions)
pass_ctg = not is_ctg_range_given or ctg_start <= zero_based_position+1 <= ctg_end
pass_bed = not is_bed_file_given or is_region_in(tree, ctg_name, zero_based_position)
if not pass_bed or not pass_ctg:
continue
# output probability checking
pass_output_probability = True
if is_building_training_dataset and is_variant_file_given:
temp_key = ctg_name + ":" + str(zero_based_position+1)
pass_output_probability = (
temp_key not in variants_map and (
(temp_key in non_variants_map and random.uniform(0, 1) <= output_probability_near_variant) or
(temp_key not in non_variants_map and random.uniform(0, 1) <= output_probability_outside_variant)
)
)
elif is_building_training_dataset:
pass_output_probability = random.uniform(0, 1) <= output_probability
if not pass_output_probability:
continue
# for depth checking and af checking
try:
reference_base = evc_base_from(reference_sequence[
zero_based_position - (0 if reference_start is None else (reference_start - 1))
])
position_dict = pileup[zero_based_position]
except:
continue
# depth checking
base_count = list(position_dict.items())
depth = sum(x[1] for x in base_count) - position_dict["I"] - position_dict["D"]
if depth < minimum_depth_for_candidate:
continue
# af checking
denominator = depth if depth > 0 else 1
base_count.sort(key=lambda x: -x[1]) # sort base_count descendingly
pass_af = (
base_count[0][0] != reference_base or
(float(base_count[1][1]) / denominator) >= minimum_af_for_candidate
)
if not pass_af:
continue
# output 1-based candidate
if temp_key is not None and temp_key in non_variants_map:
no_of_candidates_near_variant += 1
elif temp_key is not None and temp_key not in non_variants_map:
no_of_candidates_outside_variant += 1
output = [ctg_name, zero_based_position+1, reference_base, depth]
output.extend(["%s %d" % x for x in base_count])
output = " ".join([str(x) for x in output]) + "\n"
can_fp.stdin.write(output)
for zero_based_position in positions:
del pileup[zero_based_position]
if is_finish_reading_output:
break
if need_consider_candidates_near_variant:
print("# of candidates near variant: ", no_of_candidates_near_variant)
print("# of candidates outside variant: ", no_of_candidates_outside_variant)
samtools_view_process.stdout.close()
samtools_view_process.wait()
if not is_using_stdout_for_output_candidate:
can_fp.stdin.close()
can_fp.wait()
can_fpo.close()
if number_of_reads_processed == 0:
print("No read has been process, either the genome region you specified has no read cover, or please check the correctness of your BAM input (%s)." % (
bam_file_path), file=sys.stderr)
sys.exit(0)
def main():
parser = ArgumentParser(description="Generate 1-based variant candidates using alignments")
parser.add_argument('--bam_fn', type=str, default="input.bam",
help="Sorted bam file input, default: %(default)s")
parser.add_argument('--ref_fn', type=str, default="ref.fa",
help="Reference fasta file input, default: %(default)s")
parser.add_argument('--bed_fn', type=str, default=None,
help="Call variant only in these regions, works in intersection with ctgName, ctgStart and ctgEnd, optional, default: as defined by ctgName, ctgStart and ctgEnd")
parser.add_argument('--can_fn', type=str, default="PIPE",
help="Pile-up count output, use PIPE for standard output, default: %(default)s")
parser.add_argument('--var_fn', type=str, default=None,
help="Candidate sites VCF file input, if provided, will choose candidate +/- 1 or +/- 2. Use together with gen4Training. default: %(default)s")
parser.add_argument('--threshold', type=float, default=0.125,
help="Minimum allele frequence of the 1st non-reference allele for a site to be considered as a condidate site, default: %(default)f")
parser.add_argument('--minCoverage', type=float, default=4,
help="Minimum coverage required to call a variant, default: %(default)f")
parser.add_argument('--minMQ', type=int, default=0,
help="Minimum Mapping Quality. Mapping quality lower than the setting will be filtered, default: %(default)d")
parser.add_argument('--gen4Training', action='store_true',
help="Output all genome positions as candidate for model training (Set --threshold to 0), default: %(default)s")
# parser.add_argument('--candidates', type=int, default=7000000,
# help="Use with gen4Training, number of variant candidates to be generated, default: %(default)s")
# parser.add_argument('--genomeSize', type=int, default=3000000000,
# help="Use with gen4Training, default: %(default)s")
parser.add_argument('--outputProb', type=float, default=(7000000.0 * RATIO_OF_NON_VARIANT_TO_VARIANT / 3000000000),
help="output probability")
parser.add_argument('--ctgName', type=str, default="chr17",
help="The name of sequence to be processed, default: %(default)s")
parser.add_argument('--ctgStart', type=int, default=None,
help="The 1-based starting position of the sequence to be processed")
parser.add_argument('--ctgEnd', type=int, default=None,
help="The 1-based inclusive ending position of the sequence to be processed")
parser.add_argument('--samtools', type=str, default="samtools",
help="Path to the 'samtools', default: %(default)s")
args = parser.parse_args()
if len(sys.argv[1:]) == 0:
parser.print_help()
sys.exit(1)
make_candidates(args)
if __name__ == "__main__":
main()
|
py | b412979aa238f37df3b32465d3416a92538cf213 | #!/usr/bin/env python3
import sys
from testsupport import info, run_project_executable, warn, run, subprocess, find_project_executable
def main() -> None:
# Replace with the executable you want to test
with open("client_output_test_5.txt", "w+") as stdout:
try:
cmd = find_project_executable("server")
print(cmd)
info("Run multithreaded-server test (6 threads) ...")
with subprocess.Popen([cmd, "6", "1025"], stdout=subprocess.PIPE) as proc:
info("Run multithreaded-client test (6 threads) ...")
run_project_executable("client", args=["6", "localhost", "1025", "25000"], stdout=stdout)
proc.kill();
outs, errs = proc.communicate()
Lista = [x for x in outs.decode('utf-8').replace('\\n', '\n').split('\n') if x!='']
output = open("client_output_test_5.txt").readlines()
Listb = [x.replace('\n', '') for x in output if x!='']
Lista = list(map(int, Lista))
Listb = list(map(int, Listb))
Lista.sort()
Listb.sort()
if Lista != Listb:
warn(f"output does not match")
print(Lista)
print(Listb)
sys.exit(2)
num = 12500 * 6 *3
if num not in Lista:
warn(f"output not correct")
print(num)
print(Lista)
sys.exit(2)
info("OK")
except OSError as e:
warn(f"Failed to run command: {e}")
sys.exit(1)
if __name__ == "__main__":
main()
|
py | b41298e6f36f8d3d1fff0d48d5fc765c98994308 | """
Author: O. Pannekoucke
Description:
Defined time schemes as resnet using keras.
"""
import tensorflow.keras as keras
def make_nn_rk4(dt, trend):
""" Implementation of an RK4 with Keras """
state = keras.layers.Input(shape = trend.input_shape[1:])
# k1
k1 = trend(state)
# k2
_tmp_1 = keras.layers.Lambda(lambda x : 0.5*dt*x)(k1)
input_k2 = keras.layers.add([state,_tmp_1])
k2 = trend(input_k2)
# k3
_tmp_2 = keras.layers.Lambda(lambda x : 0.5*dt*x)(k2)
input_k3 = keras.layers.add([state,_tmp_2])
k3 = trend(input_k3)
# k4
_tmp_3 = keras.layers.Lambda(lambda x : dt*x)(k3)
input_k4 = keras.layers.add([state,_tmp_3])
k4 = trend(input_k4)
# output
# k2+k3
add_k2_k3 = keras.layers.add([k2,k3])
add_k2_k3_mul2 = keras.layers.Lambda(lambda x:2.*x)(add_k2_k3)
# Add k1,k4
_sum = keras.layers.add([k1,add_k2_k3_mul2,k4])
# *dt
_sc_mul = keras.layers.Lambda(lambda x:dt/6.*x)(_sum)
output = keras.layers.add([state, _sc_mul])
time_scheme = keras.models.Model(inputs =[state],
outputs=[output])
return time_scheme |
py | b4129934f7aa47be45f8b7f15ea9d86f298eee81 | # Faça um programa que tenha uma função notas() que pode receber várias notas de alunos e vai retornar um dicionário com as seguintes informações:
# Quantidade de notas
# A maior nota
# A menor nota
# A media da turma
# A situação (opcional)
def notas(*notas:float, situação:bool = False):
"""
Uma função feita para mostrar:
A quantidade de notas
A maior nota
A menor nota
A media da turma
A situação da turma
:param notas: Uma ou mais notas de alunos
:param situação: A situação da turma (opcional)
:return: Dicionario com todas as informações mostradas acima
"""
totalnota = dict()
quantidenotas = len(notas)
maiornota = 0
menornota = 10
media = 0
for x in notas:
if(x > maiornota):
maiornota = x
if(x < menornota):
menornota = x
media += x
media /= quantidenotas
totalnota['quantidade'] = quantidenotas
totalnota['maiornota'] = maiornota
totalnota['menornota'] = menornota
totalnota['media'] = media
if(situação == True):
if(media < 4):
totalnota['situação'] = 'horrivel'
elif(media > 7):
totalnota['situação'] = 'otima'
else:
totalnota['situação'] = 'rasoavel'
return totalnota
print(notas(10, 10, 10, 10, 10, situação=True)) |
py | b41299ad8be023aed9efab336394f46fc97e0453 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
# TODO: put package requirements here
]
setup_requirements = [
'pytest-runner',
# TODO(timb07): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
'pytest',
# TODO: put package test requirements here
]
setup(
name='irritable',
version='0.1.0',
description="Irritable implements broken iterators called irritables",
long_description=readme + '\n\n' + history,
author="Tim Bell",
author_email='[email protected]',
url='https://github.com/timb07/irritable',
packages=find_packages(include=['irritable']),
include_package_data=True,
install_requires=requirements,
license="BSD license",
zip_safe=False,
keywords='irritable',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
|
py | b4129aa017d6998d7f69d844a95265427139687e | import gym
from stable_baselines3 import DQN
env = gym.make("CartPole-v1")
model = DQN("MlpPolicy", env, verbose=1)
model.learn(total_timesteps=100)
|
py | b4129b899764dc6c083fb67ab529e4f7442f85bf | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import numpy as np
import cv2
class Visualizer(object):
def __init__(self, rect_size):
self.rect_size = rect_size
self.line_width = 2
def fill_pixel(self, i, j, fill_color):
self.img[j][i] = fill_color
def draw_rect(self, i, j, fill_color):
cv2.rectangle(self.img, (i*self.rect_size+self.line_width,j*self.rect_size+self.line_width), \
((i+1)*self.rect_size-self.line_width,(j+1)*self.rect_size-self.line_width), fill_color, -1)
def color_index(self,num):
if num< 256:
return [num,0,0]
elif num < 512:
return [255,num-256,0]
elif num < 768:
return [768-num,256,0]
elif num < 1024:
return [0,256,num-768]
elif num < 1280:
return [0,0,1280-num]
else:
print("Color Error!")
return [255,255,255]
def visual_pixel(self, feat_map):
input_ = feat_map.data
self.img_size = input_.shape[0]
size = self.img_size
self.img = np.ones([size,size,3]).astype(np.uint8) * 255
colors = {}
for i in range(self.img_size):
for j in range(self.img_size):
colors[input_[i][j]] = 1
color_num = len(colors.keys())
print('Different Color Num: ',color_num)
color_step = int(1280/(color_num+1))
colors = sorted(colors.items(), key=lambda item:item[0])
color_dict = {0:0}
num = 1
for color in colors:
if color[0] != 0:
color_dict[color[0]] = num * color_step
num += 1
print('Different Color Num: ', len(color_dict.keys()))
for i in range(self.img_size):
for j in range(self.img_size):
color = self.color_index(color_dict[input_[i][j]])
self.img.itemset((i,j,0),color[0])
self.img.itemset((i,j,1),color[1])
self.img.itemset((i,j,2),color[2])
#self.img[i][j][0] = color[0]
#self.img[i][j][1] = color[1]
#self.img[i][j][2] = color[2]
def visual(self, feat_map):
input_ = feat_map.data
self.img_size = input_.shape[0]
size = self.img_size * self.rect_size
self.img = np.zeros([size,size,3]).astype(np.uint8)
cv2.rectangle(self.img,(0,0),(size,size),(255,255,255),2)
colors = {}
for i in range(self.img_size):
for j in range(self.img_size):
colors[input_[i][j]] = 1
color_num = len(colors.keys())
print('Different Color Num: ', color_num)
color_step = int(1280/(color_num+1))
colors = sorted(colors.items(), key=lambda item:item[0])
color_dict = {0:0}
num = 1
for color in colors:
if color[0] != 0:
color_dict[color[0]] = num * color_step
num += 1
color_max=0
color_min=1280
for key in color_dict:
if color_dict[key] > color_max:
color_max = color_dict[key]
if color_dict[key] < color_min:
color_min = color_dict[key]
if len(color_dict)==2:
for key in color_dict.keys():
color_dict[key] = 640
else:
for key in color_dict.keys():
temp = int(float(color_dict[key]-color_min)/float(color_max-color_min)*1150)
color_dict[key] = temp
for i in range(self.img_size):
for j in range(self.img_size):
self.draw_rect(i,j,fill_color=self.color_index(color_dict[input_[i][j]]))
def show(self):
cv2.imshow('visual',self.img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def save(self, path):
cv2.imwrite(path,self.img)
def size(self):
return self.img_size
|
py | b4129bab7f36a9d823f2633695679450cd97ef97 | ## \file Calculations.py
# \author Andrea Clemeno
# \brief Provides functions for calculating the outputs
import math
## \brief Calculates elimination constant (d^-1)
# \param inParams structure holding the input values
# \return elimination constant (d^-1)
def func_k(inParams):
outfile = open("log.txt", "a")
print("function func_k called with inputs: {", file=outfile)
print(" inParams = ", end="", file=outfile)
print("Instance of InputParameters object", file=outfile)
print(" }", file=outfile)
outfile.close()
return (math.log(inParams.N_o) - math.log(inParams.N_t)) / inParams.t_t
## \brief Calculates predicted viral load at time t (mol/mL)
# \param inParams structure holding the input values
# \param k elimination constant (d^-1)
# \return predicted viral load at time t (mol/mL)
def func_N_p(inParams, k):
outfile = open("log.txt", "a")
print("function func_N_p called with inputs: {", file=outfile)
print(" inParams = ", end="", file=outfile)
print("Instance of InputParameters object", end="", file=outfile)
print(", ", file=outfile)
print(" k = ", end="", file=outfile)
print(k, file=outfile)
print(" }", file=outfile)
outfile.close()
return inParams.N_o * math.exp(-k * inParams.t_p)
|
py | b4129fdbbb00778563500fe460f060f7b2342cc0 | # Generated by Django 3.0.5 on 2020-04-25 17:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import med_result.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MedResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(verbose_name='Description')),
('add_date', models.DateTimeField(auto_now_add=True)),
('date_of_exam', models.DateTimeField()),
('image', models.ImageField(blank=True, null=True, upload_to=med_result.models.med_image_file_path)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
py | b4129feed460cda4310982ae27e3311c812b62b8 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
from flexget.utils.requests import RequestException
log = logging.getLogger('my_anime_list')
STATUS = {
'watching': 1,
'completed': 2,
'on_hold': 3,
'dropped': 4,
'plan_to_watch': 6,
'all': 7
}
ANIME_TYPE = [
'all',
'tv',
'ova',
'movie',
'special',
'ona',
'music',
'unknown'
]
class MyAnimeList(object):
"""" Creates entries for series and movies from MyAnimeList list
Syntax:
my_anime_list:
username: <value>
status:
- <watching|completed|on_hold|dropped|plan_to_watch>
- <watching|completed|on_hold|dropped|plan_to_watch>
...
type:
- <series|ova...>
"""
schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'status': one_or_more({'type': 'string', 'enum': list(STATUS.keys()), 'default': 'all'}, unique_items=True),
'type': one_or_more({'type': 'string', 'enum': list(ANIME_TYPE), 'default': 'all'}, unique_items=True)
},
'required': ['username'],
'additionalProperties': False
}
@cached('my_anime_list', persist='2 hours')
def on_task_input(self, task, config):
entries = []
selected_status = config['status']
selected_types = config['type']
if not isinstance(selected_status, list):
selected_status = [selected_status]
if not isinstance(selected_types, list):
selected_types = [selected_types]
selected_status = [STATUS[s] for s in selected_status]
try:
list_response = task.requests.get('https://myanimelist.net/animelist/' + config['username'] + '/load.json')
except RequestException as e:
raise plugin.PluginError('Error finding list on url: {url}'.format(url=e.request.url))
try:
list_json = list_response.json()
except ValueError:
raise plugin.PluginError('Invalid JSON response')
for anime in list_json:
has_selected_status = anime["status"] in selected_status or config['status'] == 'all'
has_selected_type = anime["anime_media_type_string"].lower() in selected_types or config['type'] == 'all'
if has_selected_status and has_selected_type:
entries.append(
Entry(
title=anime["anime_title"],
url="https://myanimelist.net" + anime["anime_url"],
mal_name=anime["anime_title"],
mal_poster=anime["anime_image_path"],
mal_type=anime["anime_media_type_string"]
)
)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(MyAnimeList, 'my_anime_list', api_ver=2)
|
py | b412a16a48fb8a15f625e3c1f70150e36f30d4c9 | import abc
class InteractionResource(abc.ABC):
def __init__(self, interaction_service, response_factory):
self.interaction_service = interaction_service
self.response_factory = response_factory
@abc.abstractmethod
def execute(self):
pass
|
py | b412a1fd63c2b545237b0ac62f175d9da6e4f1f7 | #
# PySNMP MIB module IFEXT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IFEXT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:53:13 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ifExt, = mibBuilder.importSymbols("APENT-MIB", "ifExt")
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, NotificationType, Unsigned32, ModuleIdentity, iso, TimeTicks, Bits, Gauge32, IpAddress, Integer32, Counter32, Counter64, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "NotificationType", "Unsigned32", "ModuleIdentity", "iso", "TimeTicks", "Bits", "Gauge32", "IpAddress", "Integer32", "Counter32", "Counter64", "MibIdentifier")
RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention")
ifExtMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 2467, 1, 24, 1))
if mibBuilder.loadTexts: ifExtMib.setLastUpdated('9801282000Z')
if mibBuilder.loadTexts: ifExtMib.setOrganization('ArrowPoint Communications Inc.')
if mibBuilder.loadTexts: ifExtMib.setContactInfo('Postal: ArrowPoint Communications Inc. 50 Nagog Park Acton, Massachusetts 01720 Tel: +1 978-206-3000 option 1 E-Mail: [email protected]')
if mibBuilder.loadTexts: ifExtMib.setDescription('This MIB is not external viewable. It provides a portion of the interface entry table for internal consumption.')
apIfTable = MibTable((1, 3, 6, 1, 4, 1, 2467, 1, 24, 2), )
if mibBuilder.loadTexts: apIfTable.setStatus('current')
if mibBuilder.loadTexts: apIfTable.setDescription('A table of interface entries')
apIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2467, 1, 24, 2, 1), ).setIndexNames((0, "IFEXT-MIB", "apIfIndex"))
if mibBuilder.loadTexts: apIfEntry.setStatus('current')
if mibBuilder.loadTexts: apIfEntry.setDescription('ArrowPoint interface record')
apIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 24, 2, 1, 1), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apIfIndex.setStatus('current')
if mibBuilder.loadTexts: apIfIndex.setDescription('The ifIndex of the interface entry')
apIfType = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 24, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(6, 18, 22, 23, 30, 32, 54, 81, 82, 108, 117, 1005, 1006))).clone(namedValues=NamedValues(("fe", 6), ("ds1", 18), ("console", 22), ("ppp", 23), ("ds3", 30), ("fr", 32), ("ct", 54), ("ds0", 81), ("ds0b", 82), ("pppmm", 108), ("ge", 117), ("madlan", 1005), ("sar", 1006)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apIfType.setStatus('current')
if mibBuilder.loadTexts: apIfType.setDescription('The ifType of the interface entry')
apIfCategory = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 24, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("physical", 1), ("not-physical", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apIfCategory.setStatus('current')
if mibBuilder.loadTexts: apIfCategory.setDescription('The if category of the interface entry')
apIfSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 24, 2, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apIfSlot.setStatus('current')
if mibBuilder.loadTexts: apIfSlot.setDescription('The slot associated with this interface')
apIfPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 24, 2, 1, 5), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apIfPort.setStatus('current')
if mibBuilder.loadTexts: apIfPort.setDescription('The physical port associated with this interface')
apIfStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 24, 2, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apIfStatus.setStatus('current')
if mibBuilder.loadTexts: apIfStatus.setDescription('This status object for this row')
apIfCctTable = MibTable((1, 3, 6, 1, 4, 1, 2467, 1, 24, 3), )
if mibBuilder.loadTexts: apIfCctTable.setStatus('current')
if mibBuilder.loadTexts: apIfCctTable.setDescription('A table of interface entries')
apIfCctEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2467, 1, 24, 3, 1), ).setIndexNames((0, "IFEXT-MIB", "apIfCctIfIndex"))
if mibBuilder.loadTexts: apIfCctEntry.setStatus('current')
if mibBuilder.loadTexts: apIfCctEntry.setDescription('ArrowPoint circuit record')
apIfCctIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 24, 3, 1, 1), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apIfCctIfIndex.setStatus('current')
if mibBuilder.loadTexts: apIfCctIfIndex.setDescription('The ifIndex of the circuit')
apIfCctStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 24, 3, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apIfCctStatus.setStatus('current')
if mibBuilder.loadTexts: apIfCctStatus.setDescription('This status object for this row')
apIfRedundantSCM = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 24, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("present", 1), ("not-present", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apIfRedundantSCM.setStatus('current')
if mibBuilder.loadTexts: apIfRedundantSCM.setDescription('The presence of the redundant SCM')
mibBuilder.exportSymbols("IFEXT-MIB", ifExtMib=ifExtMib, apIfCctIfIndex=apIfCctIfIndex, apIfRedundantSCM=apIfRedundantSCM, PYSNMP_MODULE_ID=ifExtMib, apIfIndex=apIfIndex, apIfType=apIfType, apIfStatus=apIfStatus, apIfEntry=apIfEntry, apIfCategory=apIfCategory, apIfTable=apIfTable, apIfPort=apIfPort, apIfCctStatus=apIfCctStatus, apIfCctEntry=apIfCctEntry, apIfSlot=apIfSlot, apIfCctTable=apIfCctTable)
|
py | b412a2b9c209da1817e948ce99648435232d7bb3 | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.11 Python SDK
Pure Storage FlashBlade REST 1.11 Python SDK. Compatible with REST API versions 1.0 - 1.11. Developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.11
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ArrayConnectionKeyResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[ArrayConnectionKey]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None): # noqa: E501
"""ArrayConnectionKeyResponse - a model defined in Swagger""" # noqa: E501
self._pagination_info = None
self._items = None
self.discriminator = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""Gets the pagination_info of this ArrayConnectionKeyResponse. # noqa: E501
pagination information, only available in GET requests # noqa: E501
:return: The pagination_info of this ArrayConnectionKeyResponse. # noqa: E501
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""Sets the pagination_info of this ArrayConnectionKeyResponse.
pagination information, only available in GET requests # noqa: E501
:param pagination_info: The pagination_info of this ArrayConnectionKeyResponse. # noqa: E501
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""Gets the items of this ArrayConnectionKeyResponse. # noqa: E501
A list of array connection key objects. # noqa: E501
:return: The items of this ArrayConnectionKeyResponse. # noqa: E501
:rtype: list[ArrayConnectionKey]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this ArrayConnectionKeyResponse.
A list of array connection key objects. # noqa: E501
:param items: The items of this ArrayConnectionKeyResponse. # noqa: E501
:type: list[ArrayConnectionKey]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ArrayConnectionKeyResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ArrayConnectionKeyResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b412a3425ad69e109882b3b6ed30eaad8f02ec0f | import torch
import torch.nn as nn
from tqdm import tqdm # for beautiful model training updates
def trainer(model,device, trainloader, testloader, optimizer,epochs,criterion,scheduler):
train_losses = [] # to capture train losses over training epochs
train_accuracy = [] # to capture train accuracy over training epochs
test_losses = [] # to capture test losses
test_accuracy = [] # to capture test accuracy
for epoch in range(epochs):
print("EPOCH:", epoch+1)
train(model, device, trainloader, optimizer, epoch,criterion,train_accuracy,train_losses,scheduler) # Training Function
test(model, device, testloader,criterion,test_accuracy,test_losses) # Test Function
return train_accuracy, train_losses, test_accuracy, test_losses
# # Training Function
def train(model, device, train_loader, optimizer, epoch,criterion,train_accuracy,train_losses,scheduler = None):
model.train() # setting the model in training
pbar = tqdm(train_loader) # putting the iterator in pbar
correct = 0 # for accuracy numerator
processed =0 # for accuracy denominator
for batch_idx, (images,labels) in enumerate(pbar):
images, labels = images.to(device),labels.to(device)#sending data to CPU or GPU as per device
optimizer.zero_grad() # setting gradients to zero to avoid accumulation
y_preds = model(images) # forward pass, result captured in y_preds (plural as there are many images in a batch)
# the predictions are in one hot vector
loss = criterion(y_preds,labels) # capturing loss
train_losses.append(loss) # to capture loss over many epochs
loss.backward() # backpropagation
optimizer.step() # updating the params
if scheduler:
scheduler.step()
preds = y_preds.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += preds.eq(labels.view_as(preds)).sum().item()
processed += len(images)
pbar.set_description(desc= f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}')
train_accuracy.append(100*correct/processed)
# # Test Function
def test(model, device, test_loader,criterion,test_accuracy,test_losses) :
model.eval() # setting the model in evaluation mode
test_loss = 0
correct = 0 # for accuracy numerator
with torch.no_grad():
for (images,labels) in test_loader:
images, labels = images.to(device),labels.to(device)#sending data to CPU or GPU as per device
outputs = model(images) # forward pass, result captured in outputs (plural as there are many images in a batch)
# the outputs are in batch size x one hot vector
test_loss = criterion(outputs,labels).item() # sum up batch loss
preds = outputs.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += preds.eq(labels.view_as(preds)).sum().item()
test_loss /= len(test_loader.dataset) # average test loss
test_losses.append(test_loss) # to capture loss over many batches
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test_accuracy.append(100*correct/len(test_loader.dataset))
|
py | b412a4a4c56b0b2bc32fc30a0d846894037b0f08 | class Solution:
def XXX(self, digits: List[int]) -> List[int]:
n = len(digits)
for i in range(n - 1, -1, -1):
if digits[i] == 9:
digits[i] = 0
else:
digits[i] += 1
return digits
return [1] + digits
|
py | b412a6698b94649251b600d34b91292cd3df1010 | #
# Copyright (c) 2018 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from os.path import exists
from os.path import isdir
import attr
from commoncode import saneyaml
from plugincode.post_scan import PostScanPlugin
from plugincode.post_scan import post_scan_impl
from scancode import CommandLineOption
from scancode import POST_SCAN_GROUP
@post_scan_impl
class LicensePolicy(PostScanPlugin):
"""
Add the "license_policy" attribute to a resouce if it contains a
detected license key that is found in the license_policy.yml file
"""
resource_attributes = dict(license_policy=attr.ib(default=attr.Factory(dict)))
sort_order = 9
options = [
CommandLineOption(('--license-policy',),
multiple=False,
metavar='FILE',
help='Load a License Policy file and apply it to the scan at the '
'Resource level.',
help_group=POST_SCAN_GROUP)
]
def is_enabled(self, license_policy, **kwargs):
return license_policy
def process_codebase(self, codebase, license_policy, **kwargs):
"""
Populate a license_policy mapping with four attributes: license_key, label,
icon, and color_code at the File Resource level.
"""
if not self.is_enabled(license_policy):
return
if has_policy_duplicates(license_policy):
codebase.errors.append('ERROR: License Policy file contains duplicate entries.\n')
return
# get a list of unique license policies from the license_policy file
policies = load_license_policy(license_policy).get('license_policies', [])
# apply policy to Resources if they contain an offending license
for resource in codebase.walk(topdown=True):
if not resource.is_file:
continue
try:
resource_license_keys = set([entry.get('key') for entry in resource.licenses])
except AttributeError:
# add license_policy regardless if there is license info or not
resource.license_policy = {}
codebase.save_resource(resource)
continue
for key in resource_license_keys:
for policy in policies:
if key == policy.get('license_key'):
# Apply the policy to the Resource
resource.license_policy = policy
codebase.save_resource(resource)
def has_policy_duplicates(license_policy_location):
"""
Returns True if the policy file contains duplicate entries for a specific license
key. Returns False otherwise.
"""
policies = load_license_policy(license_policy_location).get('license_policies', [])
unique_policies = OrderedDict()
if policies == []:
return False
for policy in policies:
license_key = policy.get('license_key')
if license_key in unique_policies.keys():
return True
else:
unique_policies[license_key] = policy
return False
def load_license_policy(license_policy_location):
"""
Return a license_policy dictionary loaded from a license policy file.
"""
if not license_policy_location or not exists(license_policy_location):
return {}
elif isdir(license_policy_location):
return {}
with open(license_policy_location, 'r') as conf:
conf_content = conf.read()
return saneyaml.load(conf_content)
|
py | b412a6b0154a7aad30857379ce307c89b3477b40 | import os
import hashlib
from lib.logger import get_logger
logger = get_logger()
class Md5Generator(object):
def compute_md5(self, file_path):
m = hashlib.md5()
fd = -1
if os.name == 'posix':
fd = os.open(file_path, (os.O_RDONLY | os.O_NONBLOCK | os.O_NOFOLLOW))
else:
fd = os.open(file_path, os.O_RDONLY)
with os.fdopen(fd) as f:
for chunk in iter(lambda: f.read(hashlib.md5().block_size * 512), b''):
m.update(chunk)
return m.hexdigest()
if __name__ == "__main__":
g = Md5Generator()
logger.info('/dev/urandom md5 -> {}'.format(g.compute_md5('/dev/urandom')))
logger.info('/boot/vmlinuz-3.16.0-43-generic md5 -> {}'.format(g.compute_md5('/boot/vmlinuz-3.16.0-43-generic')))
|
py | b412a6c19a343bad170b92255b32fb6c4df7c402 | from socket import socket, AF_INET, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR
class Connection:
timeout = 5
address = '127.0.0.1'
port = 1234
players = 2
coding = 'utf-8'
class Client:
"""Сетевая часть клиента.
Занимается созданием и подключением сокета, а также отправкой и получением сообщений через него."""
def __init__(self):
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.connect((Connection.address, Connection.port))
def send(self, msg):
"""Отправка сообщения на сервера"""
self.socket.send(msg.encode(Connection.coding))
def recv(self, size = 1024):
"""Получение сообщения заданного размера от сервера"""
return self.socket.recv(size).decode(Connection.coding)
class Server:
"""Сетевая часть сервера.
Занимается созданием сокета, установлением связи с клиентами, а также и получением и отправкой им сообщений."""
def __init__(self):
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.settimeout(Connection.timeout)
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.socket.bind((Connection.address, Connection.port))
self.socket.listen(Connection.players)
def accept(self):
"""Установление связи с клиентом"""
return self.socket.accept()
def send(self, conn, msg):
"""Отправка сообщения на клиент"""
conn.send(msg.encode(Connection.coding))
def recv(self, conn, size=1024):
"""Получение сообщения заданного размера от клиента"""
return conn.recv(size).decode(Connection.coding)
|
py | b412a89bd349ae5b64c5bdee1ef4686192fd3308 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, \
"capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1,
node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1,
node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(
arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array
elements.
"""
if end is None:
end = self._capacity - 1
if end < 0:
end += self._capacity
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1])
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf'))
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
|
py | b412a9804f401cfd2fb31b6008df11a28bb175e9 | import logging
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
logging.basicConfig(filename='module.log',level=logging.INFO, format=LOG_FORMAT)
from playlist import Playlist
class Player:
def __init__(self, name, modules, config, serverHttpRequest, serverMq):
self.name = name
self.serverMq = serverMq;
self.playerModules = dict();
for moduleClass in modules:
print "loading player module " + moduleClass["moduleName"]
self.playerModules[moduleClass["moduleName"]] = moduleClass["class"](config, self.next)
self.playlist = Playlist(config.get("Server", "name"), serverHttpRequest);
self.currentPlayer = None;
self.job = None
def getPlayer(self, name):
LOGGER.info("getting player " + name)
return self.playerModules[name]
def switchPlayer(self, name):
next = self.getPlayer(name)
if self.currentPlayer is None or next != self.currentPlayer:
if self.currentPlayer is not None:
self.currentPlayer.pause()
self.currentPlayer = next
def play(self, track=None):
track = track or self.playlist.get()
if track is not None:
LOGGER.info("playing (name: " + str(track.name) + ", uri: " + str(track.uri) + ") on: " + track.source)
self.switchPlayer(track.source)
if self.currentPlayer.play(track) is False:
self.next();
else:
self.serverMq.emit("player:status", {'status':'PLAYING', "playingId": track._id, "track": track.jsonFull})
#self.setSendProgressJob()
else:
if self.job is not None:
self.job.remove()
self.serverMq.emit("player:status", {'status':"PAUSED"})
LOGGER.info("playlist empty")
def resume(self):
LOGGER.info("curent track = " + str(self.currentPlayer.currentTrack));
if self.currentPlayer.currentTrack is None:
self.play()
else:
self.currentPlayer.resume()
LOGGER.info("emit 'player:status' => 'PLAYING'")
self.serverMq.emit("player:status", {'status':'PLAYING'})
self.setSendProgressJob()
def pause(self):
self.currentPlayer.pause()
if self.job is not None:
self.job.remove()
self.serverMq.emit("player:status", {'status':"PAUSED"})
def next(self):
next = self.playlist.next();
if next is None:
LOGGER.info("Nothing after")
self.pause()
return None
else:
LOGGER.info("playing next")
self.play(next)
return next;
def previous(self):
previous = self.playlist.previous();
if previous is not None:
self.play(previous)
else:
self.pause()
|
py | b412aa1c2f01827c6205c74220482410591e90a4 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class DialerCampaignRuleConfigChangeCampaignRuleEntities(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
DialerCampaignRuleConfigChangeCampaignRuleEntities - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'campaigns': 'list[DialerCampaignRuleConfigChangeUriReference]',
'sequences': 'list[DialerCampaignRuleConfigChangeUriReference]'
}
self.attribute_map = {
'campaigns': 'campaigns',
'sequences': 'sequences'
}
self._campaigns = None
self._sequences = None
@property
def campaigns(self):
"""
Gets the campaigns of this DialerCampaignRuleConfigChangeCampaignRuleEntities.
:return: The campaigns of this DialerCampaignRuleConfigChangeCampaignRuleEntities.
:rtype: list[DialerCampaignRuleConfigChangeUriReference]
"""
return self._campaigns
@campaigns.setter
def campaigns(self, campaigns):
"""
Sets the campaigns of this DialerCampaignRuleConfigChangeCampaignRuleEntities.
:param campaigns: The campaigns of this DialerCampaignRuleConfigChangeCampaignRuleEntities.
:type: list[DialerCampaignRuleConfigChangeUriReference]
"""
self._campaigns = campaigns
@property
def sequences(self):
"""
Gets the sequences of this DialerCampaignRuleConfigChangeCampaignRuleEntities.
:return: The sequences of this DialerCampaignRuleConfigChangeCampaignRuleEntities.
:rtype: list[DialerCampaignRuleConfigChangeUriReference]
"""
return self._sequences
@sequences.setter
def sequences(self, sequences):
"""
Sets the sequences of this DialerCampaignRuleConfigChangeCampaignRuleEntities.
:param sequences: The sequences of this DialerCampaignRuleConfigChangeCampaignRuleEntities.
:type: list[DialerCampaignRuleConfigChangeUriReference]
"""
self._sequences = sequences
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | b412aa7d815fbc723365b853ed7456120848b230 | from txENet.enet_server_endpoint import ENetServerEndpoint
class Binding(ENetServerEndpoint):
def __init__(self, reactor, metrics_service, interface, port, maxclients, channels, maxdown=0, maxup=0, max_duplicate_peers=None):
metric_prefix = "{}.{}".format(interface.replace('.', '_'), port)
received_metric_name = "{}.rx".format(metric_prefix)
sent_metric_name = "{}.tx".format(metric_prefix)
peer_count_metric_name = "{}.peer_count".format(metric_prefix)
metrics_service.register_repeating_metric(received_metric_name, 1.0, self._get_and_reset_bytes_received)
metrics_service.register_repeating_metric(sent_metric_name, 1.0, self._get_and_reset_bytes_sent)
metrics_service.register_repeating_metric(peer_count_metric_name, 1.0, self._get_peer_count)
ENetServerEndpoint.__init__(self, reactor, interface, port, maxclients, channels, maxdown=maxdown, maxup=maxup, max_duplicate_peers=max_duplicate_peers)
def _get_and_reset_bytes_received(self):
if self._enet_host is None: return 0
try:
return self._enet_host.total_received_data
finally:
self._enet_host.reset_total_received_data()
def _get_and_reset_bytes_sent(self):
if self._enet_host is None: return 0
try:
return self._enet_host.total_sent_data
finally:
self._enet_host.reset_total_sent_data()
def _get_peer_count(self):
if self._enet_host is None: return 0
return self._enet_host.peer_count
|
py | b412aab9ce0079ff6d82c95cd661d1b6c467a846 | # Generated by Django 3.0.2 on 2020-02-23 16:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("aws_environments", "0011_service_last_status"),
]
operations = [
migrations.AddField(
model_name="service",
name="is_deleted",
field=models.BooleanField(default=False),
),
migrations.AlterUniqueTogether(name="service", unique_together=set(),),
]
|
py | b412aafebafab6475d5d1e19cc1debddf4fba51e | import json
from collections import defaultdict
def _parse(raw):
if raw is None or isinstance(raw, (dict, list, int, float)):
return raw
assert isinstance(raw, str)
try:
return json.loads(raw)
except json.JSONDecodeError:
return raw
def _diff_vals(old, new, with_unchanged):
if (
isinstance(new, list)
and isinstance(old, list)
and len(old) == len(new) == 1
):
return _diff_vals(old[0], new[0], with_unchanged)
if not with_unchanged and old == new:
return {}
res = {"old": old, "new": new}
if isinstance(new, (int, float)) and isinstance(old, (int, float)):
res["diff"] = new - old
return res
def _flatten(d):
if not d:
return defaultdict(lambda: None)
if isinstance(d, dict):
from flatten_json import flatten as fltn
return defaultdict(lambda: None, fltn(d, "."))
return defaultdict(lambda: "unable to parse")
def _diff_dicts(old_dict, new_dict, with_unchanged):
new = _flatten(new_dict)
old = _flatten(old_dict)
res = defaultdict(dict)
xpaths = set(old.keys())
xpaths.update(set(new.keys()))
for xpath in xpaths:
old_val = old[xpath]
new_val = new[xpath]
val_diff = _diff_vals(old_val, new_val, with_unchanged)
if val_diff:
res[xpath] = val_diff
return dict(res)
def _diff(old_raw, new_raw, with_unchanged):
old = _parse(old_raw)
new = _parse(new_raw)
if isinstance(new, dict) or isinstance(old, dict):
return _diff_dicts(old, new, with_unchanged)
val_diff = _diff_vals(old, new, with_unchanged)
if val_diff:
return {"": val_diff}
return {}
def diff(old, new, with_unchanged=False):
paths = set(old.keys())
paths.update(set(new.keys()))
res = defaultdict(dict)
for path in paths:
path_diff = _diff(old.get(path), new.get(path), with_unchanged)
if path_diff:
res[path] = path_diff
return dict(res)
def table(header, rows, markdown=False):
from tabulate import tabulate
if not rows and not markdown:
return ""
return tabulate(
rows,
header,
tablefmt="github" if markdown else "plain",
disable_numparse=True,
# None will be shown as "" by default, overriding
missingval="None",
)
def format_dict(d):
ret = {}
for key, val in d.items():
if isinstance(val, dict):
new_val = format_dict(val)
elif isinstance(val, list):
new_val = str(val)
else:
new_val = val
ret[key] = new_val
return ret
|
py | b412ab05a836453b4468a6e43e82be4d24d6a468 | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from taggit.managers import TaggableManager
from nomadgram.users import models as user_models
# Create your models here.
@python_2_unicode_compatible
class TimeStampedModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
@python_2_unicode_compatible
class Image(TimeStampedModel) :
"""Image Model"""
file= models.ImageField()
location= models.CharField(max_length=140)
caption = models.TextField()
creator = models.ForeignKey(user_models.User, on_delete=models.CASCADE, null=True, related_name='images')
tags = TaggableManager()
@property
def like_count(self):
return self.likes.all().count()
@property
def comment_count(self):
return self.comments.all().count()
def __str__(self):
return '{}-{}'.format(self.location, self.caption)
class Meta:
ordering = ['-created_at']
@python_2_unicode_compatible
class Comment(TimeStampedModel) :
"""Comment Model"""
message= models.TextField()
creator = models.ForeignKey(user_models.User, on_delete=models.CASCADE, null=True)
image = models.ForeignKey(Image, on_delete=models.CASCADE, null=True, related_name='comments')
def __str__(self):
return self.message
@python_2_unicode_compatible
class Like(TimeStampedModel) :
"""Like Model"""
creator = models.ForeignKey(user_models.User, on_delete=models.CASCADE, null=True)
image = models.ForeignKey(Image, on_delete=models.CASCADE, null=True , related_name='likes')
def __str__(self):
return 'User: {} - Image Caption: {}'.format(self.creator.username, self.image.caption) |
py | b412ab6860c74965073aaa002d797c5d3ae38a1d | """
.. _model-gcn:
Graph Convolutional Network
====================================
**Author:** `Qi Huang <https://github.com/HQ01>`_, `Minjie Wang <https://jermainewang.github.io/>`_,
Yu Gai, Quan Gan, Zheng Zhang
This is a gentle introduction of using DGL to implement Graph Convolutional
Networks (Kipf & Welling et al., `Semi-Supervised Classification with Graph
Convolutional Networks <https://arxiv.org/pdf/1609.02907.pdf>`_). We build upon
the :doc:`earlier tutorial <../../basics/3_pagerank>` on DGLGraph and demonstrate
how DGL combines graph with deep neural network and learn structural representations.
"""
###############################################################################
# Model Overview
# ------------------------------------------
# GCN from the perspective of message passing
# ```````````````````````````````````````````````
# We describe a layer of graph convolutional neural network from a message
# passing perspective; the math can be found `here <math_>`_.
# It boils down to the following step, for each node :math:`u`:
#
# 1) Aggregate neighbors' representations :math:`h_{v}` to produce an
# intermediate representation :math:`\hat{h}_u`. 2) Transform the aggregated
# representation :math:`\hat{h}_{u}` with a linear projection followed by a
# non-linearity: :math:`h_{u} = f(W_{u} \hat{h}_u)`.
#
# We will implement step 1 with DGL message passing, and step 2 with the
# ``apply_nodes`` method, whose node UDF will be a PyTorch ``nn.Module``.
#
# GCN implementation with DGL
# ``````````````````````````````````````````
# We first define the message and reduce function as usual. Since the
# aggregation on a node :math:`u` only involves summing over the neighbors'
# representations :math:`h_v`, we can simply use builtin functions:
import dgl
import dgl.function as fn
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from dgl import DGLGraph
gcn_msg = fn.copy_src(src='h', out='m')
gcn_reduce = fn.sum(msg='m', out='h')
###############################################################################
# We then define the node UDF for ``apply_nodes``, which is a fully-connected layer:
class NodeApplyModule(nn.Module):
def __init__(self, in_feats, out_feats, activation):
super(NodeApplyModule, self).__init__()
self.linear = nn.Linear(in_feats, out_feats)
self.activation = activation
def forward(self, node):
h = self.linear(node.data['h'])
h = self.activation(h)
return {'h' : h}
###############################################################################
# We then proceed to define the GCN module. A GCN layer essentially performs
# message passing on all the nodes then applies the `NodeApplyModule`. Note
# that we omitted the dropout in the paper for simplicity.
class GCN(nn.Module):
def __init__(self, in_feats, out_feats, activation):
super(GCN, self).__init__()
self.apply_mod = NodeApplyModule(in_feats, out_feats, activation)
def forward(self, g, feature):
g.ndata['h'] = feature
g.update_all(gcn_msg, gcn_reduce)
g.apply_nodes(func=self.apply_mod)
return g.ndata.pop('h')
###############################################################################
# The forward function is essentially the same as any other commonly seen NNs
# model in PyTorch. We can initialize GCN like any ``nn.Module``. For example,
# let's define a simple neural network consisting of two GCN layers. Suppose we
# are training the classifier for the cora dataset (the input feature size is
# 1433 and the number of classes is 7).
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.gcn1 = GCN(1433, 16, F.relu)
self.gcn2 = GCN(16, 7, F.relu)
def forward(self, g, features):
x = self.gcn1(g, features)
x = self.gcn2(g, x)
return x
net = Net()
print(net)
###############################################################################
# We load the cora dataset using DGL's built-in data module.
from dgl.data import citation_graph as citegrh
def load_cora_data():
data = citegrh.load_cora()
features = th.FloatTensor(data.features)
labels = th.LongTensor(data.labels)
mask = th.ByteTensor(data.train_mask)
g = data.graph
# add self loop
g.remove_edges_from(g.selfloop_edges())
g = DGLGraph(g)
g.add_edges(g.nodes(), g.nodes())
return g, features, labels, mask
###############################################################################
# We then train the network as follows:
import time
import numpy as np
g, features, labels, mask = load_cora_data()
optimizer = th.optim.Adam(net.parameters(), lr=1e-3)
dur = []
for epoch in range(30):
if epoch >=3:
t0 = time.time()
logits = net(g, features)
logp = F.log_softmax(logits, 1)
loss = F.nll_loss(logp[mask], labels[mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >=3:
dur.append(time.time() - t0)
print("Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f}".format(
epoch, loss.item(), np.mean(dur)))
###############################################################################
# .. _math:
#
# GCN in one formula
# ------------------
# Mathematically, the GCN model follows this formula:
#
# :math:`H^{(l+1)} = \sigma(\tilde{D}^{-\frac{1}{2}}\tilde{A}\tilde{D}^{-\frac{1}{2}}H^{(l)}W^{(l)})`
#
# Here, :math:`H^{(l)}` denotes the :math:`l^{th}` layer in the network,
# :math:`\sigma` is the non-linearity, and :math:`W` is the weight matrix for
# this layer. :math:`D` and :math:`A`, as commonly seen, represent degree
# matrix and adjacency matrix, respectively. The ~ is a renormalization trick
# in which we add a self-connection to each node of the graph, and build the
# corresponding degree and adjacency matrix. The shape of the input
# :math:`H^{(0)}` is :math:`N \times D`, where :math:`N` is the number of nodes
# and :math:`D` is the number of input features. We can chain up multiple
# layers as such to produce a node-level representation output with shape
# :math`N \times F`, where :math:`F` is the dimension of the output node
# feature vector.
#
# The equation can be efficiently implemented using sparse matrix
# multiplication kernels (such as Kipf's
# `pygcn <https://github.com/tkipf/pygcn>`_ code). The above DGL implementation
# in fact has already used this trick due to the use of builtin functions. To
# understand what is under the hood, please read our tutorial on :doc:`PageRank <../../basics/3_pagerank>`.
|
py | b412ac22b465965e290948d6491771a7e2723774 | '''
Created by auto_sdk on 2020.11.12
'''
from dingtalk.api.base import RestApi
class OapiMicroappCustomDeleteRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.agent_id = None
self.app_corp_id = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.microapp.custom.delete'
|
py | b412ac2dc145c89c0eeae0ed851e85eca2c3ca7c | import os
import pandas as pd
import SimpleITK as sitk
import numpy as np
import torch
from blast_ct.localisation.localise_lesions import LesionVolumeLocalisationMNI
from blast_ct.localisation.register_to_template import RegistrationToCTTemplate
from blast_ct.nifti.datasets import FullImageToOverlappingPatchesNiftiDataset
from blast_ct.nifti.patch_samplers import get_patch_and_padding
from blast_ct.nifti.rescale import create_reference_reoriented_image
CLASS_NAMES = ['background', 'iph', 'eah', 'oedema', 'ivh']
def add_predicted_volumes_to_dataframe(dataframe, id_, array, resolution):
voxel_volume_ml = np.prod(resolution) / 1000.
for i, class_name in enumerate(CLASS_NAMES):
if i == 0:
continue
volume = np.sum(array == i) * voxel_volume_ml
dataframe.loc[dataframe['id'] == id_, f'{class_name:s}_predicted_volume_ml'] = volume
return dataframe
def save_image(output_array, input_image, path, resolution=None):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
image = sitk.GetImageFromArray(output_array)
reference = create_reference_reoriented_image(input_image)
image.SetOrigin(reference.GetOrigin())
image.SetDirection(reference.GetDirection())
image.SetSpacing(resolution) if resolution is not None else image.SetSpacing(reference.GetSpacing())
image = sitk.Resample(image, input_image, sitk.Transform(), sitk.sitkNearestNeighbor, 0)
sitk.WriteImage(image, path)
return image
def get_num_maps(patches):
shape = patches[0].shape
if len(shape) == 3:
return 1
elif len(shape) == 4:
return shape[0]
else:
raise ValueError('Trying to save a tensor with dimensionality which is not 3 or 4.')
def reconstruct_image(patches, image_shape, center_points, patch_shape):
num_maps = get_num_maps(patches)
assert len(patches) == len(center_points)
padded_shape = tuple(s - s % ps + ps for s, ps in zip(image_shape, patch_shape))
reconstruction = np.zeros(shape=(num_maps,) + padded_shape)
for center, patch in zip(center_points, patches):
slices, _ = get_patch_and_padding(padded_shape, patch_shape, center)
reconstruction[(slice(0, num_maps, 1),) + tuple(slices)] = patch
reconstruction = reconstruction[(slice(0, num_maps, 1),) + tuple(slice(0, s, 1) for s in image_shape)]
reconstruction = reconstruction.transpose(tuple(range(1, reconstruction.ndim)) + (0,))
return reconstruction
class Localisation(object):
def __init__(self, localisation_dir, num_runs, native_space):
if not os.path.exists(localisation_dir):
os.makedirs(localisation_dir)
self.localisation_dir = localisation_dir
asset_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'data/localisation_files')
target_template_path = os.path.join(asset_dir, 'ct_template.nii.gz')
atlas_label_map_path = os.path.join(asset_dir, 'atlas_template_space.nii.gz')
brain_mask_path = os.path.join(asset_dir, 'ct_template_mask.nii.gz')
roi_dictionary_csv = os.path.join(asset_dir, 'atlas_labels.csv')
self.register = RegistrationToCTTemplate(localisation_dir, target_template_path, num_runs=num_runs)
self.localise = LesionVolumeLocalisationMNI(localisation_dir, native_space, atlas_label_map_path,
brain_mask_path, roi_dictionary_csv, 'prediction')
def __call__(self, data_index, image_id, input_image, prediction):
transform, data_index = self.register(data_index, input_image, image_id)
if transform is not None:
return self.localise(transform, data_index, image_id, prediction)
return data_index
class NiftiPatchSaver(object):
def __init__(self, job_dir, dataloader, write_prob_maps=True, extra_output_names=None, do_localisation=False,
num_reg_runs=1, native_space=True):
assert isinstance(dataloader.dataset, FullImageToOverlappingPatchesNiftiDataset)
self.prediction_dir = os.path.join(job_dir, 'predictions')
self.dataloader = dataloader
self.dataset = dataloader.dataset
self.write_prob_maps = write_prob_maps
self.patches = []
self.extra_output_patches = {key: [] for key in extra_output_names} if extra_output_names is not None else {}
self.image_index = 0
self.data_index = self.dataset.data_index.copy()
localisation_dir = os.path.join(job_dir, 'localisation')
self.localisation = Localisation(localisation_dir, num_reg_runs, native_space) if do_localisation else None
self.prediction_csv_path = os.path.join(self.prediction_dir, 'prediction.csv')
def reset(self):
self.image_index = 0
self.patches = []
if self.extra_output_patches is not None:
self.extra_output_patches = {key: [] for key in self.extra_output_patches}
def append(self, state):
if self.write_prob_maps:
self.patches += list(state['prob'].cpu().detach())
else:
self.patches += list(state['pred'].cpu().detach())
for name in self.extra_output_patches:
self.extra_output_patches[name] += list(state[name].cpu().detach())
def __call__(self, state):
self.append(state)
target_shape, center_points = self.dataset.image_mapping[self.image_index]
target_patch_shape = self.dataset.patch_sampler.target_patch_size
patches_in_image = len(center_points)
if len(self.patches) >= patches_in_image:
to_write = {}
image_id = self.dataset.data_index.loc[self.image_index]['id']
input_image = sitk.ReadImage(self.dataset.data_index.loc[self.image_index][self.dataset.channels[0]])
patches = list(torch.stack(self.patches[0:patches_in_image]).numpy())
self.patches = self.patches[patches_in_image:]
reconstruction = reconstruct_image(patches, target_shape, center_points, target_patch_shape)
if self.write_prob_maps:
to_write['prob_maps'] = reconstruction
to_write['prediction'] = np.argmax(reconstruction, axis=-1).astype(np.float64)
else:
to_write['prediction'] = reconstruction
for name in self.extra_output_patches:
patches = list(torch.stack(self.extra_output_patches[name][0:patches_in_image]).numpy())
self.extra_output_patches[name] = self.extra_output_patches[name][patches_in_image:]
images = reconstruct_image(patches, target_shape, center_points, target_patch_shape)
to_write[name] = images
resolution = self.dataset.resolution
for name, array in to_write.items():
path = os.path.join(self.prediction_dir, f'{str(image_id):s}_{name:s}.nii.gz')
self.data_index.loc[self.data_index['id'] == image_id, name] = path
try:
output_image = save_image(array, input_image, path, resolution)
if name == 'prediction':
resolution_ = resolution if resolution is not None else input_image.GetSpacing()
self.data_index = add_predicted_volumes_to_dataframe(self.data_index, image_id, array,
resolution_)
if self.localisation is not None:
self.data_index = self.localisation(self.data_index, image_id, input_image, output_image)
message = f"{self.image_index:d}/{len(self.dataset.data_index):d}: Saved prediction for {str(image_id)}."
except:
message = f"{self.image_index:d}/{len(self.dataset.data_index):d}: Error saving prediction for {str(image_id)}."
continue
if os.path.exists(self.prediction_csv_path):
prediction_csv = pd.read_csv(self.prediction_csv_path)
prediction_csv.set_index('id', inplace=True)
prediction_csv.loc[image_id] = self.data_index.loc[self.image_index]
prediction_csv.to_csv(self.prediction_csv_path, index=True, index_label='id')
else:
self.data_index.to_csv(self.prediction_csv_path, index=False)
self.image_index += 1
if self.image_index >= len(self.dataset.image_mapping):
self.reset()
return message
return None
|
py | b412ac4c1350c9f4d059bffb7dcf4a253f89c7b3 | """Test breaking inside functions defined within a BSD archive file libfoo.a."""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class BSDArchivesTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number in a(int) to break at.
self.line = line_number(
'a.c', '// Set file and line breakpoint inside a().')
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr24527. Makefile.rules doesn't know how to build static libs on Windows")
def test(self):
"""Break inside a() and b() defined within libfoo.a."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside a() by file and line first.
lldbutil.run_break_set_by_file_and_line(
self, "a.c", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# Break at a(int) first.
self.expect("frame variable", VARIABLES_DISPLAYED_CORRECTLY,
substrs=['(int) arg = 1'])
self.expect("frame variable __a_global", VARIABLES_DISPLAYED_CORRECTLY,
substrs=['(int) __a_global = 1'])
# Set breakpoint for b() next.
lldbutil.run_break_set_by_symbol(
self, "b", num_expected_locations=1, sym_exact=True)
# Continue the program, we should break at b(int) next.
self.runCmd("continue")
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
self.expect("frame variable", VARIABLES_DISPLAYED_CORRECTLY,
substrs=['(int) arg = 2'])
self.expect("frame variable __b_global", VARIABLES_DISPLAYED_CORRECTLY,
substrs=['(int) __b_global = 2'])
|
py | b412ad4880884f4130451e23c61fe901628db7f1 | # -*- coding: utf-8 -*-
"""
sphinx.websupport.storage.sqlalchemy_db
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SQLAlchemy table and mapper definitions used by the
:class:`sphinx.websupport.storage.sqlalchemystorage.SQLAlchemyStorage`.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from datetime import datetime
from sqlalchemy import Column, Integer, Text, String, Boolean, \
ForeignKey, DateTime
from sqlalchemy.orm import relation, sessionmaker, aliased
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
Session = sessionmaker()
db_prefix = 'sphinx_'
class Node(Base):
"""Data about a Node in a doctree."""
__tablename__ = db_prefix + 'nodes'
id = Column(String(32), primary_key=True)
document = Column(String(256), nullable=False)
source = Column(Text, nullable=False)
def nested_comments(self, username, moderator):
"""Create a tree of comments. First get all comments that are
descendants of this node, then convert them to a tree form.
:param username: the name of the user to get comments for.
:param moderator: whether the user is moderator.
"""
session = Session()
if username:
# If a username is provided, create a subquery to retrieve all
# votes by this user. We will outerjoin with the comment query
# with this subquery so we have a user's voting information.
sq = session.query(CommentVote).\
filter(CommentVote.username == username).subquery()
cvalias = aliased(CommentVote, sq)
q = session.query(Comment, cvalias.value).outerjoin(cvalias)
else:
# If a username is not provided, we don't need to join with
# CommentVote.
q = session.query(Comment)
# Filter out all comments not descending from this node.
q = q.filter(Comment.path.like(str(self.id) + '.%'))
# Filter out all comments that are not moderated yet.
if not moderator:
q = q.filter(Comment.displayed == True) # noqa
# Retrieve all results. Results must be ordered by Comment.path
# so that we can easily transform them from a flat list to a tree.
results = q.order_by(Comment.path).all()
session.close()
return self._nest_comments(results, username)
def _nest_comments(self, results, username):
"""Given the flat list of results, convert the list into a
tree.
:param results: the flat list of comments
:param username: the name of the user requesting the comments.
"""
comments = []
list_stack = [comments]
for r in results:
if username:
comment, vote = r
else:
comment, vote = (r, 0)
inheritance_chain = comment.path.split('.')[1:]
if len(inheritance_chain) == len(list_stack) + 1:
parent = list_stack[-1][-1]
list_stack.append(parent['children'])
elif len(inheritance_chain) < len(list_stack):
while len(inheritance_chain) < len(list_stack):
list_stack.pop()
list_stack[-1].append(comment.serializable(vote=vote))
return comments
def __init__(self, id, document, source):
self.id = id
self.document = document
self.source = source
class CommentVote(Base):
"""A vote a user has made on a Comment."""
__tablename__ = db_prefix + 'commentvote'
username = Column(String(64), primary_key=True)
comment_id = Column(Integer, ForeignKey(db_prefix + 'comments.id'),
primary_key=True)
# -1 if downvoted, +1 if upvoted, 0 if voted then unvoted.
value = Column(Integer, nullable=False)
def __init__(self, comment_id, username, value):
self.comment_id = comment_id
self.username = username
self.value = value
class Comment(Base):
"""An individual Comment being stored."""
__tablename__ = db_prefix + 'comments'
id = Column(Integer, primary_key=True)
rating = Column(Integer, nullable=False)
time = Column(DateTime, nullable=False)
text = Column(Text, nullable=False)
displayed = Column(Boolean, index=True, default=False)
username = Column(String(64))
proposal = Column(Text)
proposal_diff = Column(Text)
path = Column(String(256), index=True)
node_id = Column(String(32), ForeignKey(db_prefix + 'nodes.id'))
node = relation(Node, backref="comments")
votes = relation(CommentVote, backref="comment",
cascade="all")
def __init__(self, text, displayed, username, rating, time,
proposal, proposal_diff):
self.text = text
self.displayed = displayed
self.username = username
self.rating = rating
self.time = time
self.proposal = proposal
self.proposal_diff = proposal_diff
def set_path(self, node_id, parent_id):
"""Set the materialized path for this comment."""
# This exists because the path can't be set until the session has
# been flushed and this Comment has an id.
if node_id:
self.node_id = node_id
self.path = '%s.%s' % (node_id, self.id)
else:
session = Session()
parent_path = session.query(Comment.path).\
filter(Comment.id == parent_id).one().path
session.close()
self.node_id = parent_path.split('.')[0]
self.path = '%s.%s' % (parent_path, self.id)
def serializable(self, vote=0):
"""Creates a serializable representation of the comment. This is
converted to JSON, and used on the client side.
"""
delta = datetime.now() - self.time
time = {'year': self.time.year,
'month': self.time.month,
'day': self.time.day,
'hour': self.time.hour,
'minute': self.time.minute,
'second': self.time.second,
'iso': self.time.isoformat(),
'delta': self.pretty_delta(delta)}
path = self.path.split('.')
node = path[0]
if len(path) > 2:
parent = path[-2]
else:
parent = None
return {'text': self.text,
'username': self.username or 'Anonymous',
'id': self.id,
'node': node,
'parent': parent,
'rating': self.rating,
'displayed': self.displayed,
'age': delta.seconds,
'time': time,
'vote': vote or 0,
'proposal_diff': self.proposal_diff,
'children': []}
def pretty_delta(self, delta):
"""Create a pretty representation of the Comment's age.
(e.g. 2 minutes).
"""
days = delta.days
seconds = delta.seconds
hours = seconds / 3600
minutes = seconds / 60
if days == 0:
if hours == 0:
dt = (minutes, 'minute')
else:
dt = (hours, 'hour')
else:
dt = (days, 'day')
if dt[0] == 1:
ret = '%s %s ago' % dt
else:
ret = '%s %ss ago' % dt
return ret
|
py | b412ae9aff13a99031d0a163118ba6a2828b6f43 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from collections.abc import Iterable
import pickle
import sys
import weakref
import numpy as np
import pytest
import pyarrow as pa
import pyarrow.compute as pc
def test_chunked_array_basics():
data = pa.chunked_array([], type=pa.string())
assert data.type == pa.string()
assert data.to_pylist() == []
data.validate()
data2 = pa.chunked_array([], type='binary')
assert data2.type == pa.binary()
with pytest.raises(ValueError):
pa.chunked_array([])
data = pa.chunked_array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
assert isinstance(data.chunks, list)
assert all(isinstance(c, pa.lib.Int64Array) for c in data.chunks)
assert all(isinstance(c, pa.lib.Int64Array) for c in data.iterchunks())
assert len(data.chunks) == 3
assert data.get_total_buffer_size() == sum(c.get_total_buffer_size()
for c in data.iterchunks())
assert sys.getsizeof(data) >= object.__sizeof__(
data) + data.get_total_buffer_size()
assert data.nbytes == 3 * 3 * 8 # 3 items per 3 lists with int64 size(8)
data.validate()
wr = weakref.ref(data)
assert wr() is not None
del data
assert wr() is None
def test_chunked_array_construction():
arr = pa.chunked_array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
])
assert arr.type == pa.int64()
assert len(arr) == 9
assert len(arr.chunks) == 3
arr = pa.chunked_array([
[1, 2, 3],
[4., 5., 6.],
[7, 8, 9],
])
assert arr.type == pa.int64()
assert len(arr) == 9
assert len(arr.chunks) == 3
arr = pa.chunked_array([
[1, 2, 3],
[4., 5., 6.],
[7, 8, 9],
], type=pa.int8())
assert arr.type == pa.int8()
assert len(arr) == 9
assert len(arr.chunks) == 3
arr = pa.chunked_array([
[1, 2, 3],
[]
])
assert arr.type == pa.int64()
assert len(arr) == 3
assert len(arr.chunks) == 2
msg = (
"When passing an empty collection of arrays you must also pass the "
"data type"
)
with pytest.raises(ValueError, match=msg):
assert pa.chunked_array([])
assert pa.chunked_array([], type=pa.string()).type == pa.string()
assert pa.chunked_array([[]]).type == pa.null()
assert pa.chunked_array([[]], type=pa.string()).type == pa.string()
def test_combine_chunks():
# ARROW-77363
arr = pa.array([1, 2])
chunked_arr = pa.chunked_array([arr, arr])
res = chunked_arr.combine_chunks()
expected = pa.array([1, 2, 1, 2])
assert res.equals(expected)
def test_chunked_array_to_numpy():
data = pa.chunked_array([
[1, 2, 3],
[4, 5, 6],
[]
])
arr1 = np.asarray(data)
arr2 = data.to_numpy()
assert isinstance(arr2, np.ndarray)
assert arr2.shape == (6,)
assert np.array_equal(arr1, arr2)
def test_chunked_array_mismatch_types():
with pytest.raises(TypeError):
# Given array types are different
pa.chunked_array([
pa.array([1, 2, 3]),
pa.array([1., 2., 3.])
])
with pytest.raises(TypeError):
# Given array type is different from explicit type argument
pa.chunked_array([pa.array([1, 2, 3])], type=pa.float64())
def test_chunked_array_str():
data = [
pa.array([1, 2, 3]),
pa.array([4, 5, 6])
]
data = pa.chunked_array(data)
assert str(data) == """[
[
1,
2,
3
],
[
4,
5,
6
]
]"""
def test_chunked_array_getitem():
data = [
pa.array([1, 2, 3]),
pa.array([4, 5, 6])
]
data = pa.chunked_array(data)
assert data[1].as_py() == 2
assert data[-1].as_py() == 6
assert data[-6].as_py() == 1
with pytest.raises(IndexError):
data[6]
with pytest.raises(IndexError):
data[-7]
# Ensure this works with numpy scalars
assert data[np.int32(1)].as_py() == 2
data_slice = data[2:4]
assert data_slice.to_pylist() == [3, 4]
data_slice = data[4:-1]
assert data_slice.to_pylist() == [5]
data_slice = data[99:99]
assert data_slice.type == data.type
assert data_slice.to_pylist() == []
def test_chunked_array_slice():
data = [
pa.array([1, 2, 3]),
pa.array([4, 5, 6])
]
data = pa.chunked_array(data)
data_slice = data.slice(len(data))
assert data_slice.type == data.type
assert data_slice.to_pylist() == []
data_slice = data.slice(len(data) + 10)
assert data_slice.type == data.type
assert data_slice.to_pylist() == []
table = pa.Table.from_arrays([data], names=["a"])
table_slice = table.slice(len(table))
assert len(table_slice) == 0
table = pa.Table.from_arrays([data], names=["a"])
table_slice = table.slice(len(table) + 10)
assert len(table_slice) == 0
def test_chunked_array_iter():
data = [
pa.array([0]),
pa.array([1, 2, 3]),
pa.array([4, 5, 6]),
pa.array([7, 8, 9])
]
arr = pa.chunked_array(data)
for i, j in zip(range(10), arr):
assert i == j.as_py()
assert isinstance(arr, Iterable)
def test_chunked_array_equals():
def eq(xarrs, yarrs):
if isinstance(xarrs, pa.ChunkedArray):
x = xarrs
else:
x = pa.chunked_array(xarrs)
if isinstance(yarrs, pa.ChunkedArray):
y = yarrs
else:
y = pa.chunked_array(yarrs)
assert x.equals(y)
assert y.equals(x)
assert x == y
assert x != str(y)
def ne(xarrs, yarrs):
if isinstance(xarrs, pa.ChunkedArray):
x = xarrs
else:
x = pa.chunked_array(xarrs)
if isinstance(yarrs, pa.ChunkedArray):
y = yarrs
else:
y = pa.chunked_array(yarrs)
assert not x.equals(y)
assert not y.equals(x)
assert x != y
eq(pa.chunked_array([], type=pa.int32()),
pa.chunked_array([], type=pa.int32()))
ne(pa.chunked_array([], type=pa.int32()),
pa.chunked_array([], type=pa.int64()))
a = pa.array([0, 2], type=pa.int32())
b = pa.array([0, 2], type=pa.int64())
c = pa.array([0, 3], type=pa.int32())
d = pa.array([0, 2, 0, 3], type=pa.int32())
eq([a], [a])
ne([a], [b])
eq([a, c], [a, c])
eq([a, c], [d])
ne([c, a], [a, c])
# ARROW-4822
assert not pa.chunked_array([], type=pa.int32()).equals(None)
@pytest.mark.parametrize(
('data', 'typ'),
[
([True, False, True, True], pa.bool_()),
([1, 2, 4, 6], pa.int64()),
([1.0, 2.5, None], pa.float64()),
(['a', None, 'b'], pa.string()),
([], pa.list_(pa.uint8())),
([[1, 2], [3]], pa.list_(pa.int64())),
([['a'], None, ['b', 'c']], pa.list_(pa.string())),
([(1, 'a'), (2, 'c'), None],
pa.struct([pa.field('a', pa.int64()), pa.field('b', pa.string())]))
]
)
def test_chunked_array_pickle(data, typ):
arrays = []
while data:
arrays.append(pa.array(data[:2], type=typ))
data = data[2:]
array = pa.chunked_array(arrays, type=typ)
array.validate()
result = pickle.loads(pickle.dumps(array))
result.validate()
assert result.equals(array)
@pytest.mark.pandas
def test_chunked_array_to_pandas():
import pandas as pd
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.table(data, names=['a'])
col = table.column(0)
assert isinstance(col, pa.ChunkedArray)
series = col.to_pandas()
assert isinstance(series, pd.Series)
assert series.shape == (5,)
assert series[0] == -10
assert series.name == 'a'
@pytest.mark.pandas
def test_chunked_array_to_pandas_preserve_name():
# https://issues.apache.org/jira/browse/ARROW-7709
import pandas as pd
import pandas.testing as tm
for data in [
pa.array([1, 2, 3]),
pa.array(pd.Categorical(["a", "b", "a"])),
pa.array(pd.date_range("2012", periods=3)),
pa.array(pd.date_range("2012", periods=3, tz="Europe/Brussels")),
pa.array([1, 2, 3], pa.timestamp("ms")),
pa.array([1, 2, 3], pa.timestamp("ms", "Europe/Brussels"))]:
table = pa.table({"name": data})
result = table.column("name").to_pandas()
assert result.name == "name"
expected = pd.Series(data.to_pandas(), name="name")
tm.assert_series_equal(result, expected)
@pytest.mark.xfail
@pytest.mark.pandas
def test_table_roundtrip_to_pandas_empty_dataframe():
# https://issues.apache.org/jira/browse/ARROW-10643
import pandas as pd
data = pd.DataFrame(index=pd.RangeIndex(0, 10, 1))
table = pa.table(data)
result = table.to_pandas()
# TODO the conversion results in a table with 0 rows if the original
# DataFrame has a RangeIndex (i.e. no index column in the converted
# Arrow table)
assert table.num_rows == 10
assert data.shape == (10, 0)
assert result.shape == (10, 0)
@pytest.mark.pandas
def test_to_pandas_empty_table():
# https://issues.apache.org/jira/browse/ARROW-15370
import pandas as pd
import pandas.testing as tm
df = pd.DataFrame({'a': [1, 2], 'b': [0.1, 0.2]})
table = pa.table(df)
result = table.schema.empty_table().to_pandas()
assert result.shape == (0, 2)
tm.assert_frame_equal(result, df.iloc[:0])
@pytest.mark.pandas
@pytest.mark.nopandas
def test_chunked_array_asarray():
# ensure this is tested both when pandas is present or not (ARROW-6564)
data = [
pa.array([0]),
pa.array([1, 2, 3])
]
chunked_arr = pa.chunked_array(data)
np_arr = np.asarray(chunked_arr)
assert np_arr.tolist() == [0, 1, 2, 3]
assert np_arr.dtype == np.dtype('int64')
# An optional type can be specified when calling np.asarray
np_arr = np.asarray(chunked_arr, dtype='str')
assert np_arr.tolist() == ['0', '1', '2', '3']
# Types are modified when there are nulls
data = [
pa.array([1, None]),
pa.array([1, 2, 3])
]
chunked_arr = pa.chunked_array(data)
np_arr = np.asarray(chunked_arr)
elements = np_arr.tolist()
assert elements[0] == 1.
assert np.isnan(elements[1])
assert elements[2:] == [1., 2., 3.]
assert np_arr.dtype == np.dtype('float64')
# DictionaryType data will be converted to dense numpy array
arr = pa.DictionaryArray.from_arrays(
pa.array([0, 1, 2, 0, 1]), pa.array(['a', 'b', 'c']))
chunked_arr = pa.chunked_array([arr, arr])
np_arr = np.asarray(chunked_arr)
assert np_arr.dtype == np.dtype('object')
assert np_arr.tolist() == ['a', 'b', 'c', 'a', 'b'] * 2
def test_chunked_array_flatten():
ty = pa.struct([pa.field('x', pa.int16()),
pa.field('y', pa.float32())])
a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)
carr = pa.chunked_array(a)
x, y = carr.flatten()
assert x.equals(pa.chunked_array(pa.array([1, 3, 5], type=pa.int16())))
assert y.equals(pa.chunked_array(pa.array([2.5, 4.5, 6.5],
type=pa.float32())))
# Empty column
a = pa.array([], type=ty)
carr = pa.chunked_array(a)
x, y = carr.flatten()
assert x.equals(pa.chunked_array(pa.array([], type=pa.int16())))
assert y.equals(pa.chunked_array(pa.array([], type=pa.float32())))
def test_chunked_array_unify_dictionaries():
arr = pa.chunked_array([
pa.array(["foo", "bar", None, "foo"]).dictionary_encode(),
pa.array(["quux", None, "foo"]).dictionary_encode(),
])
assert arr.chunk(0).dictionary.equals(pa.array(["foo", "bar"]))
assert arr.chunk(1).dictionary.equals(pa.array(["quux", "foo"]))
arr = arr.unify_dictionaries()
expected_dict = pa.array(["foo", "bar", "quux"])
assert arr.chunk(0).dictionary.equals(expected_dict)
assert arr.chunk(1).dictionary.equals(expected_dict)
assert arr.to_pylist() == ["foo", "bar", None, "foo", "quux", None, "foo"]
def test_recordbatch_basics():
data = [
pa.array(range(5), type='int16'),
pa.array([-10, -5, 0, None, 10], type='int32')
]
batch = pa.record_batch(data, ['c0', 'c1'])
assert not batch.schema.metadata
assert len(batch) == 5
assert batch.num_rows == 5
assert batch.num_columns == len(data)
# (only the second array has a null bitmap)
assert batch.get_total_buffer_size() == (5 * 2) + (5 * 4 + 1)
batch.nbytes == (5 * 2) + (5 * 4 + 1)
assert sys.getsizeof(batch) >= object.__sizeof__(
batch) + batch.get_total_buffer_size()
pydict = batch.to_pydict()
assert pydict == OrderedDict([
('c0', [0, 1, 2, 3, 4]),
('c1', [-10, -5, 0, None, 10])
])
assert type(pydict) == dict
with pytest.raises(IndexError):
# bounds checking
batch[2]
# Schema passed explicitly
schema = pa.schema([pa.field('c0', pa.int16(),
metadata={'key': 'value'}),
pa.field('c1', pa.int32())],
metadata={b'foo': b'bar'})
batch = pa.record_batch(data, schema=schema)
assert batch.schema == schema
# schema as first positional argument
batch = pa.record_batch(data, schema)
assert batch.schema == schema
assert str(batch) == """pyarrow.RecordBatch
c0: int16
c1: int32"""
assert batch.to_string(show_metadata=True) == """\
pyarrow.RecordBatch
c0: int16
-- field metadata --
key: 'value'
c1: int32
-- schema metadata --
foo: 'bar'"""
wr = weakref.ref(batch)
assert wr() is not None
del batch
assert wr() is None
def test_recordbatch_equals():
data1 = [
pa.array(range(5), type='int16'),
pa.array([-10, -5, 0, None, 10], type='int32')
]
data2 = [
pa.array(['a', 'b', 'c']),
pa.array([['d'], ['e'], ['f']]),
]
column_names = ['c0', 'c1']
batch = pa.record_batch(data1, column_names)
assert batch == pa.record_batch(data1, column_names)
assert batch.equals(pa.record_batch(data1, column_names))
assert batch != pa.record_batch(data2, column_names)
assert not batch.equals(pa.record_batch(data2, column_names))
batch_meta = pa.record_batch(data1, names=column_names,
metadata={'key': 'value'})
assert batch_meta.equals(batch)
assert not batch_meta.equals(batch, check_metadata=True)
# ARROW-8889
assert not batch.equals(None)
assert batch != "foo"
def test_recordbatch_take():
batch = pa.record_batch(
[pa.array([1, 2, 3, None, 5]),
pa.array(['a', 'b', 'c', 'd', 'e'])],
['f1', 'f2'])
assert batch.take(pa.array([2, 3])).equals(batch.slice(2, 2))
assert batch.take(pa.array([2, None])).equals(
pa.record_batch([pa.array([3, None]), pa.array(['c', None])],
['f1', 'f2']))
def test_recordbatch_column_sets_private_name():
# ARROW-6429
rb = pa.record_batch([pa.array([1, 2, 3, 4])], names=['a0'])
assert rb[0]._name == 'a0'
def test_recordbatch_from_arrays_validate_schema():
# ARROW-6263
arr = pa.array([1, 2])
schema = pa.schema([pa.field('f0', pa.list_(pa.utf8()))])
with pytest.raises(NotImplementedError):
pa.record_batch([arr], schema=schema)
def test_recordbatch_from_arrays_validate_lengths():
# ARROW-2820
data = [pa.array([1]), pa.array(["tokyo", "like", "happy"]),
pa.array(["derek"])]
with pytest.raises(ValueError):
pa.record_batch(data, ['id', 'tags', 'name'])
def test_recordbatch_no_fields():
batch = pa.record_batch([], [])
assert len(batch) == 0
assert batch.num_rows == 0
assert batch.num_columns == 0
def test_recordbatch_from_arrays_invalid_names():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10])
]
with pytest.raises(ValueError):
pa.record_batch(data, names=['a', 'b', 'c'])
with pytest.raises(ValueError):
pa.record_batch(data, names=['a'])
def test_recordbatch_empty_metadata():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10])
]
batch = pa.record_batch(data, ['c0', 'c1'])
assert batch.schema.metadata is None
def test_recordbatch_pickle():
data = [
pa.array(range(5), type='int8'),
pa.array([-10, -5, 0, 5, 10], type='float32')
]
fields = [
pa.field('ints', pa.int8()),
pa.field('floats', pa.float32()),
]
schema = pa.schema(fields, metadata={b'foo': b'bar'})
batch = pa.record_batch(data, schema=schema)
result = pickle.loads(pickle.dumps(batch))
assert result.equals(batch)
assert result.schema == schema
def test_recordbatch_get_field():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
pa.array(range(5, 10))
]
batch = pa.RecordBatch.from_arrays(data, names=('a', 'b', 'c'))
assert batch.field('a').equals(batch.schema.field('a'))
assert batch.field(0).equals(batch.schema.field('a'))
with pytest.raises(KeyError):
batch.field('d')
with pytest.raises(TypeError):
batch.field(None)
with pytest.raises(IndexError):
batch.field(4)
def test_recordbatch_select_column():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
pa.array(range(5, 10))
]
batch = pa.RecordBatch.from_arrays(data, names=('a', 'b', 'c'))
assert batch.column('a').equals(batch.column(0))
with pytest.raises(
KeyError, match='Field "d" does not exist in record batch schema'):
batch.column('d')
with pytest.raises(TypeError):
batch.column(None)
with pytest.raises(IndexError):
batch.column(4)
def test_recordbatch_from_struct_array_invalid():
with pytest.raises(TypeError):
pa.RecordBatch.from_struct_array(pa.array(range(5)))
def test_recordbatch_from_struct_array():
struct_array = pa.array(
[{"ints": 1}, {"floats": 1.0}],
type=pa.struct([("ints", pa.int32()), ("floats", pa.float32())]),
)
result = pa.RecordBatch.from_struct_array(struct_array)
assert result.equals(pa.RecordBatch.from_arrays(
[
pa.array([1, None], type=pa.int32()),
pa.array([None, 1.0], type=pa.float32()),
], ["ints", "floats"]
))
def _table_like_slice_tests(factory):
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10])
]
names = ['c0', 'c1']
obj = factory(data, names=names)
sliced = obj.slice(2)
assert sliced.num_rows == 3
expected = factory([x.slice(2) for x in data], names=names)
assert sliced.equals(expected)
sliced2 = obj.slice(2, 2)
expected2 = factory([x.slice(2, 2) for x in data], names=names)
assert sliced2.equals(expected2)
# 0 offset
assert obj.slice(0).equals(obj)
# Slice past end of array
assert len(obj.slice(len(obj))) == 0
with pytest.raises(IndexError):
obj.slice(-1)
# Check __getitem__-based slicing
assert obj.slice(0, 0).equals(obj[:0])
assert obj.slice(0, 2).equals(obj[:2])
assert obj.slice(2, 2).equals(obj[2:4])
assert obj.slice(2, len(obj) - 2).equals(obj[2:])
assert obj.slice(len(obj) - 2, 2).equals(obj[-2:])
assert obj.slice(len(obj) - 4, 2).equals(obj[-4:-2])
def test_recordbatch_slice_getitem():
return _table_like_slice_tests(pa.RecordBatch.from_arrays)
def test_table_slice_getitem():
return _table_like_slice_tests(pa.table)
@pytest.mark.pandas
def test_slice_zero_length_table():
# ARROW-7907: a segfault on this code was fixed after 0.16.0
table = pa.table({'a': pa.array([], type=pa.timestamp('us'))})
table_slice = table.slice(0, 0)
table_slice.to_pandas()
table = pa.table({'a': pa.chunked_array([], type=pa.string())})
table.to_pandas()
def test_recordbatchlist_schema_equals():
a1 = np.array([1], dtype='uint32')
a2 = np.array([4.0, 5.0], dtype='float64')
batch1 = pa.record_batch([pa.array(a1)], ['c1'])
batch2 = pa.record_batch([pa.array(a2)], ['c1'])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_batches([batch1, batch2])
def test_table_column_sets_private_name():
# ARROW-6429
t = pa.table([pa.array([1, 2, 3, 4])], names=['a0'])
assert t[0]._name == 'a0'
def test_table_equals():
table = pa.Table.from_arrays([], names=[])
assert table.equals(table)
# ARROW-4822
assert not table.equals(None)
other = pa.Table.from_arrays([], names=[], metadata={'key': 'value'})
assert not table.equals(other, check_metadata=True)
assert table.equals(other)
def test_table_from_batches_and_schema():
schema = pa.schema([
pa.field('a', pa.int64()),
pa.field('b', pa.float64()),
])
batch = pa.record_batch([pa.array([1]), pa.array([3.14])],
names=['a', 'b'])
table = pa.Table.from_batches([batch], schema)
assert table.schema.equals(schema)
assert table.column(0) == pa.chunked_array([[1]])
assert table.column(1) == pa.chunked_array([[3.14]])
incompatible_schema = pa.schema([pa.field('a', pa.int64())])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_batches([batch], incompatible_schema)
incompatible_batch = pa.record_batch([pa.array([1])], ['a'])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_batches([incompatible_batch], schema)
@pytest.mark.pandas
def test_table_to_batches():
from pandas.testing import assert_frame_equal
import pandas as pd
df1 = pd.DataFrame({'a': list(range(10))})
df2 = pd.DataFrame({'a': list(range(10, 30))})
batch1 = pa.RecordBatch.from_pandas(df1, preserve_index=False)
batch2 = pa.RecordBatch.from_pandas(df2, preserve_index=False)
table = pa.Table.from_batches([batch1, batch2, batch1])
expected_df = pd.concat([df1, df2, df1], ignore_index=True)
batches = table.to_batches()
assert len(batches) == 3
assert_frame_equal(pa.Table.from_batches(batches).to_pandas(),
expected_df)
batches = table.to_batches(max_chunksize=15)
assert list(map(len, batches)) == [10, 15, 5, 10]
assert_frame_equal(table.to_pandas(), expected_df)
assert_frame_equal(pa.Table.from_batches(batches).to_pandas(),
expected_df)
table_from_iter = pa.Table.from_batches(iter([batch1, batch2, batch1]))
assert table.equals(table_from_iter)
def test_table_basics():
data = [
pa.array(range(5), type='int64'),
pa.array([-10, -5, 0, 5, 10], type='int64')
]
table = pa.table(data, names=('a', 'b'))
table.validate()
assert len(table) == 5
assert table.num_rows == 5
assert table.num_columns == 2
assert table.shape == (5, 2)
assert table.get_total_buffer_size() == 2 * (5 * 8)
assert table.nbytes == 2 * (5 * 8)
assert sys.getsizeof(table) >= object.__sizeof__(
table) + table.get_total_buffer_size()
pydict = table.to_pydict()
assert pydict == OrderedDict([
('a', [0, 1, 2, 3, 4]),
('b', [-10, -5, 0, 5, 10])
])
assert type(pydict) == dict
columns = []
for col in table.itercolumns():
columns.append(col)
for chunk in col.iterchunks():
assert chunk is not None
with pytest.raises(IndexError):
col.chunk(-1)
with pytest.raises(IndexError):
col.chunk(col.num_chunks)
assert table.columns == columns
assert table == pa.table(columns, names=table.column_names)
assert table != pa.table(columns[1:], names=table.column_names[1:])
assert table != columns
wr = weakref.ref(table)
assert wr() is not None
del table
assert wr() is None
def test_table_from_arrays_preserves_column_metadata():
# Added to test https://issues.apache.org/jira/browse/ARROW-3866
arr0 = pa.array([1, 2])
arr1 = pa.array([3, 4])
field0 = pa.field('field1', pa.int64(), metadata=dict(a="A", b="B"))
field1 = pa.field('field2', pa.int64(), nullable=False)
table = pa.Table.from_arrays([arr0, arr1],
schema=pa.schema([field0, field1]))
assert b"a" in table.field(0).metadata
assert table.field(1).nullable is False
def test_table_from_arrays_invalid_names():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10])
]
with pytest.raises(ValueError):
pa.Table.from_arrays(data, names=['a', 'b', 'c'])
with pytest.raises(ValueError):
pa.Table.from_arrays(data, names=['a'])
def test_table_from_lists():
data = [
list(range(5)),
[-10, -5, 0, 5, 10]
]
result = pa.table(data, names=['a', 'b'])
expected = pa.Table.from_arrays(data, names=['a', 'b'])
assert result.equals(expected)
schema = pa.schema([
pa.field('a', pa.uint16()),
pa.field('b', pa.int64())
])
result = pa.table(data, schema=schema)
expected = pa.Table.from_arrays(data, schema=schema)
assert result.equals(expected)
def test_table_pickle():
data = [
pa.chunked_array([[1, 2], [3, 4]], type=pa.uint32()),
pa.chunked_array([["some", "strings", None, ""]], type=pa.string()),
]
schema = pa.schema([pa.field('ints', pa.uint32()),
pa.field('strs', pa.string())],
metadata={b'foo': b'bar'})
table = pa.Table.from_arrays(data, schema=schema)
result = pickle.loads(pickle.dumps(table))
result.validate()
assert result.equals(table)
def test_table_get_field():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
pa.array(range(5, 10))
]
table = pa.Table.from_arrays(data, names=('a', 'b', 'c'))
assert table.field('a').equals(table.schema.field('a'))
assert table.field(0).equals(table.schema.field('a'))
with pytest.raises(KeyError):
table.field('d')
with pytest.raises(TypeError):
table.field(None)
with pytest.raises(IndexError):
table.field(4)
def test_table_select_column():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
pa.array(range(5, 10))
]
table = pa.Table.from_arrays(data, names=('a', 'b', 'c'))
assert table.column('a').equals(table.column(0))
with pytest.raises(KeyError,
match='Field "d" does not exist in table schema'):
table.column('d')
with pytest.raises(TypeError):
table.column(None)
with pytest.raises(IndexError):
table.column(4)
def test_table_column_with_duplicates():
# ARROW-8209
table = pa.table([pa.array([1, 2, 3]),
pa.array([4, 5, 6]),
pa.array([7, 8, 9])], names=['a', 'b', 'a'])
with pytest.raises(KeyError,
match='Field "a" exists 2 times in table schema'):
table.column('a')
def test_table_add_column():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
pa.array(range(5, 10))
]
table = pa.Table.from_arrays(data, names=('a', 'b', 'c'))
new_field = pa.field('d', data[1].type)
t2 = table.add_column(3, new_field, data[1])
t3 = table.append_column(new_field, data[1])
expected = pa.Table.from_arrays(data + [data[1]],
names=('a', 'b', 'c', 'd'))
assert t2.equals(expected)
assert t3.equals(expected)
t4 = table.add_column(0, new_field, data[1])
expected = pa.Table.from_arrays([data[1]] + data,
names=('d', 'a', 'b', 'c'))
assert t4.equals(expected)
def test_table_set_column():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
pa.array(range(5, 10))
]
table = pa.Table.from_arrays(data, names=('a', 'b', 'c'))
new_field = pa.field('d', data[1].type)
t2 = table.set_column(0, new_field, data[1])
expected_data = list(data)
expected_data[0] = data[1]
expected = pa.Table.from_arrays(expected_data,
names=('d', 'b', 'c'))
assert t2.equals(expected)
def test_table_drop():
""" drop one or more columns given labels"""
a = pa.array(range(5))
b = pa.array([-10, -5, 0, 5, 10])
c = pa.array(range(5, 10))
table = pa.Table.from_arrays([a, b, c], names=('a', 'b', 'c'))
t2 = table.drop(['a', 'b'])
exp = pa.Table.from_arrays([c], names=('c',))
assert exp.equals(t2)
# -- raise KeyError if column not in Table
with pytest.raises(KeyError, match="Column 'd' not found"):
table.drop(['d'])
def test_table_remove_column():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
pa.array(range(5, 10))
]
table = pa.Table.from_arrays(data, names=('a', 'b', 'c'))
t2 = table.remove_column(0)
t2.validate()
expected = pa.Table.from_arrays(data[1:], names=('b', 'c'))
assert t2.equals(expected)
def test_table_remove_column_empty():
# ARROW-1865
data = [
pa.array(range(5)),
]
table = pa.Table.from_arrays(data, names=['a'])
t2 = table.remove_column(0)
t2.validate()
assert len(t2) == len(table)
t3 = t2.add_column(0, table.field(0), table[0])
t3.validate()
assert t3.equals(table)
def test_empty_table_with_names():
# ARROW-13784
data = []
names = ["a", "b"]
message = (
'Length of names [(]2[)] does not match length of arrays [(]0[)]')
with pytest.raises(ValueError, match=message):
pa.Table.from_arrays(data, names=names)
def test_empty_table():
table = pa.table([])
assert table.column_names == []
assert table.equals(pa.Table.from_arrays([], []))
def test_table_rename_columns():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
pa.array(range(5, 10))
]
table = pa.Table.from_arrays(data, names=['a', 'b', 'c'])
assert table.column_names == ['a', 'b', 'c']
t2 = table.rename_columns(['eh', 'bee', 'sea'])
t2.validate()
assert t2.column_names == ['eh', 'bee', 'sea']
expected = pa.Table.from_arrays(data, names=['eh', 'bee', 'sea'])
assert t2.equals(expected)
def test_table_flatten():
ty1 = pa.struct([pa.field('x', pa.int16()),
pa.field('y', pa.float32())])
ty2 = pa.struct([pa.field('nest', ty1)])
a = pa.array([(1, 2.5), (3, 4.5)], type=ty1)
b = pa.array([((11, 12.5),), ((13, 14.5),)], type=ty2)
c = pa.array([False, True], type=pa.bool_())
table = pa.Table.from_arrays([a, b, c], names=['a', 'b', 'c'])
t2 = table.flatten()
t2.validate()
expected = pa.Table.from_arrays([
pa.array([1, 3], type=pa.int16()),
pa.array([2.5, 4.5], type=pa.float32()),
pa.array([(11, 12.5), (13, 14.5)], type=ty1),
c],
names=['a.x', 'a.y', 'b.nest', 'c'])
assert t2.equals(expected)
def test_table_combine_chunks():
batch1 = pa.record_batch([pa.array([1]), pa.array(["a"])],
names=['f1', 'f2'])
batch2 = pa.record_batch([pa.array([2]), pa.array(["b"])],
names=['f1', 'f2'])
table = pa.Table.from_batches([batch1, batch2])
combined = table.combine_chunks()
combined.validate()
assert combined.equals(table)
for c in combined.columns:
assert c.num_chunks == 1
def test_table_unify_dictionaries():
batch1 = pa.record_batch([
pa.array(["foo", "bar", None, "foo"]).dictionary_encode(),
pa.array([123, 456, 456, 789]).dictionary_encode(),
pa.array([True, False, None, None])], names=['a', 'b', 'c'])
batch2 = pa.record_batch([
pa.array(["quux", "foo", None, "quux"]).dictionary_encode(),
pa.array([456, 789, 789, None]).dictionary_encode(),
pa.array([False, None, None, True])], names=['a', 'b', 'c'])
table = pa.Table.from_batches([batch1, batch2])
table = table.replace_schema_metadata({b"key1": b"value1"})
assert table.column(0).chunk(0).dictionary.equals(
pa.array(["foo", "bar"]))
assert table.column(0).chunk(1).dictionary.equals(
pa.array(["quux", "foo"]))
assert table.column(1).chunk(0).dictionary.equals(
pa.array([123, 456, 789]))
assert table.column(1).chunk(1).dictionary.equals(
pa.array([456, 789]))
table = table.unify_dictionaries(pa.default_memory_pool())
expected_dict_0 = pa.array(["foo", "bar", "quux"])
expected_dict_1 = pa.array([123, 456, 789])
assert table.column(0).chunk(0).dictionary.equals(expected_dict_0)
assert table.column(0).chunk(1).dictionary.equals(expected_dict_0)
assert table.column(1).chunk(0).dictionary.equals(expected_dict_1)
assert table.column(1).chunk(1).dictionary.equals(expected_dict_1)
assert table.to_pydict() == {
'a': ["foo", "bar", None, "foo", "quux", "foo", None, "quux"],
'b': [123, 456, 456, 789, 456, 789, 789, None],
'c': [True, False, None, None, False, None, None, True],
}
assert table.schema.metadata == {b"key1": b"value1"}
def test_concat_tables():
data = [
list(range(5)),
[-10., -5., 0., 5., 10.]
]
data2 = [
list(range(5, 10)),
[1., 2., 3., 4., 5.]
]
t1 = pa.Table.from_arrays([pa.array(x) for x in data],
names=('a', 'b'))
t2 = pa.Table.from_arrays([pa.array(x) for x in data2],
names=('a', 'b'))
result = pa.concat_tables([t1, t2])
result.validate()
assert len(result) == 10
expected = pa.Table.from_arrays([pa.array(x + y)
for x, y in zip(data, data2)],
names=('a', 'b'))
assert result.equals(expected)
def test_concat_tables_none_table():
# ARROW-11997
with pytest.raises(AttributeError):
pa.concat_tables([None])
@pytest.mark.pandas
def test_concat_tables_with_different_schema_metadata():
import pandas as pd
schema = pa.schema([
pa.field('a', pa.string()),
pa.field('b', pa.string()),
])
values = list('abcdefgh')
df1 = pd.DataFrame({'a': values, 'b': values})
df2 = pd.DataFrame({'a': [np.nan] * 8, 'b': values})
table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)
table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)
assert table1.schema.equals(table2.schema)
assert not table1.schema.equals(table2.schema, check_metadata=True)
table3 = pa.concat_tables([table1, table2])
assert table1.schema.equals(table3.schema, check_metadata=True)
assert table2.schema.equals(table3.schema)
def test_concat_tables_with_promotion():
t1 = pa.Table.from_arrays(
[pa.array([1, 2], type=pa.int64())], ["int64_field"])
t2 = pa.Table.from_arrays(
[pa.array([1.0, 2.0], type=pa.float32())], ["float_field"])
result = pa.concat_tables([t1, t2], promote=True)
assert result.equals(pa.Table.from_arrays([
pa.array([1, 2, None, None], type=pa.int64()),
pa.array([None, None, 1.0, 2.0], type=pa.float32()),
], ["int64_field", "float_field"]))
def test_concat_tables_with_promotion_error():
t1 = pa.Table.from_arrays(
[pa.array([1, 2], type=pa.int64())], ["f"])
t2 = pa.Table.from_arrays(
[pa.array([1, 2], type=pa.float32())], ["f"])
with pytest.raises(pa.ArrowInvalid):
pa.concat_tables([t1, t2], promote=True)
def test_table_negative_indexing():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
pa.array([1.0, 2.0, 3.0, 4.0, 5.0]),
pa.array(['ab', 'bc', 'cd', 'de', 'ef']),
]
table = pa.Table.from_arrays(data, names=tuple('abcd'))
assert table[-1].equals(table[3])
assert table[-2].equals(table[2])
assert table[-3].equals(table[1])
assert table[-4].equals(table[0])
with pytest.raises(IndexError):
table[-5]
with pytest.raises(IndexError):
table[4]
def test_table_cast_to_incompatible_schema():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
]
table = pa.Table.from_arrays(data, names=tuple('ab'))
target_schema1 = pa.schema([
pa.field('A', pa.int32()),
pa.field('b', pa.int16()),
])
target_schema2 = pa.schema([
pa.field('a', pa.int32()),
])
message = ("Target schema's field names are not matching the table's "
"field names:.*")
with pytest.raises(ValueError, match=message):
table.cast(target_schema1)
with pytest.raises(ValueError, match=message):
table.cast(target_schema2)
def test_table_safe_casting():
data = [
pa.array(range(5), type=pa.int64()),
pa.array([-10, -5, 0, 5, 10], type=pa.int32()),
pa.array([1.0, 2.0, 3.0, 4.0, 5.0], type=pa.float64()),
pa.array(['ab', 'bc', 'cd', 'de', 'ef'], type=pa.string())
]
table = pa.Table.from_arrays(data, names=tuple('abcd'))
expected_data = [
pa.array(range(5), type=pa.int32()),
pa.array([-10, -5, 0, 5, 10], type=pa.int16()),
pa.array([1, 2, 3, 4, 5], type=pa.int64()),
pa.array(['ab', 'bc', 'cd', 'de', 'ef'], type=pa.string())
]
expected_table = pa.Table.from_arrays(expected_data, names=tuple('abcd'))
target_schema = pa.schema([
pa.field('a', pa.int32()),
pa.field('b', pa.int16()),
pa.field('c', pa.int64()),
pa.field('d', pa.string())
])
casted_table = table.cast(target_schema)
assert casted_table.equals(expected_table)
def test_table_unsafe_casting():
data = [
pa.array(range(5), type=pa.int64()),
pa.array([-10, -5, 0, 5, 10], type=pa.int32()),
pa.array([1.1, 2.2, 3.3, 4.4, 5.5], type=pa.float64()),
pa.array(['ab', 'bc', 'cd', 'de', 'ef'], type=pa.string())
]
table = pa.Table.from_arrays(data, names=tuple('abcd'))
expected_data = [
pa.array(range(5), type=pa.int32()),
pa.array([-10, -5, 0, 5, 10], type=pa.int16()),
pa.array([1, 2, 3, 4, 5], type=pa.int64()),
pa.array(['ab', 'bc', 'cd', 'de', 'ef'], type=pa.string())
]
expected_table = pa.Table.from_arrays(expected_data, names=tuple('abcd'))
target_schema = pa.schema([
pa.field('a', pa.int32()),
pa.field('b', pa.int16()),
pa.field('c', pa.int64()),
pa.field('d', pa.string())
])
with pytest.raises(pa.ArrowInvalid, match='truncated'):
table.cast(target_schema)
casted_table = table.cast(target_schema, safe=False)
assert casted_table.equals(expected_table)
def test_invalid_table_construct():
array = np.array([0, 1], dtype=np.uint8)
u8 = pa.uint8()
arrays = [pa.array(array, type=u8), pa.array(array[1:], type=u8)]
with pytest.raises(pa.lib.ArrowInvalid):
pa.Table.from_arrays(arrays, names=["a1", "a2"])
@pytest.mark.parametrize('data, klass', [
((['', 'foo', 'bar'], [4.5, 5, None]), list),
((['', 'foo', 'bar'], [4.5, 5, None]), pa.array),
(([[''], ['foo', 'bar']], [[4.5], [5., None]]), pa.chunked_array),
])
def test_from_arrays_schema(data, klass):
data = [klass(data[0]), klass(data[1])]
schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float32())])
table = pa.Table.from_arrays(data, schema=schema)
assert table.num_columns == 2
assert table.num_rows == 3
assert table.schema == schema
# length of data and schema not matching
schema = pa.schema([('strs', pa.utf8())])
with pytest.raises(ValueError):
pa.Table.from_arrays(data, schema=schema)
# with different but compatible schema
schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float32())])
table = pa.Table.from_arrays(data, schema=schema)
assert pa.types.is_float32(table.column('floats').type)
assert table.num_columns == 2
assert table.num_rows == 3
assert table.schema == schema
# with different and incompatible schema
schema = pa.schema([('strs', pa.utf8()), ('floats', pa.timestamp('s'))])
with pytest.raises((NotImplementedError, TypeError)):
pa.Table.from_pydict(data, schema=schema)
# Cannot pass both schema and metadata / names
with pytest.raises(ValueError):
pa.Table.from_arrays(data, schema=schema, names=['strs', 'floats'])
with pytest.raises(ValueError):
pa.Table.from_arrays(data, schema=schema, metadata={b'foo': b'bar'})
@pytest.mark.parametrize(
('cls'),
[
(pa.Table),
(pa.RecordBatch)
]
)
def test_table_from_pydict(cls):
table = cls.from_pydict({})
assert table.num_columns == 0
assert table.num_rows == 0
assert table.schema == pa.schema([])
assert table.to_pydict() == {}
schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float64())])
# With lists as values
data = OrderedDict([('strs', ['', 'foo', 'bar']),
('floats', [4.5, 5, None])])
table = cls.from_pydict(data)
assert table.num_columns == 2
assert table.num_rows == 3
assert table.schema == schema
assert table.to_pydict() == data
# With metadata and inferred schema
metadata = {b'foo': b'bar'}
schema = schema.with_metadata(metadata)
table = cls.from_pydict(data, metadata=metadata)
assert table.schema == schema
assert table.schema.metadata == metadata
assert table.to_pydict() == data
# With explicit schema
table = cls.from_pydict(data, schema=schema)
assert table.schema == schema
assert table.schema.metadata == metadata
assert table.to_pydict() == data
# Cannot pass both schema and metadata
with pytest.raises(ValueError):
cls.from_pydict(data, schema=schema, metadata=metadata)
# Non-convertible values given schema
with pytest.raises(TypeError):
cls.from_pydict({'c0': [0, 1, 2]},
schema=pa.schema([("c0", pa.string())]))
# Missing schema fields from the passed mapping
with pytest.raises(KeyError, match="doesn\'t contain.* c, d"):
cls.from_pydict(
{'a': [1, 2, 3], 'b': [3, 4, 5]},
schema=pa.schema([
('a', pa.int64()),
('c', pa.int32()),
('d', pa.int16())
])
)
# Passed wrong schema type
with pytest.raises(TypeError):
cls.from_pydict({'a': [1, 2, 3]}, schema={})
@pytest.mark.parametrize('data, klass', [
((['', 'foo', 'bar'], [4.5, 5, None]), pa.array),
(([[''], ['foo', 'bar']], [[4.5], [5., None]]), pa.chunked_array),
])
def test_table_from_pydict_arrow_arrays(data, klass):
data = OrderedDict([('strs', klass(data[0])), ('floats', klass(data[1]))])
schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float64())])
# With arrays as values
table = pa.Table.from_pydict(data)
assert table.num_columns == 2
assert table.num_rows == 3
assert table.schema == schema
# With explicit (matching) schema
table = pa.Table.from_pydict(data, schema=schema)
assert table.num_columns == 2
assert table.num_rows == 3
assert table.schema == schema
# with different but compatible schema
schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float32())])
table = pa.Table.from_pydict(data, schema=schema)
assert pa.types.is_float32(table.column('floats').type)
assert table.num_columns == 2
assert table.num_rows == 3
assert table.schema == schema
# with different and incompatible schema
schema = pa.schema([('strs', pa.utf8()), ('floats', pa.timestamp('s'))])
with pytest.raises((NotImplementedError, TypeError)):
pa.Table.from_pydict(data, schema=schema)
@pytest.mark.parametrize('data, klass', [
((['', 'foo', 'bar'], [4.5, 5, None]), list),
((['', 'foo', 'bar'], [4.5, 5, None]), pa.array),
(([[''], ['foo', 'bar']], [[4.5], [5., None]]), pa.chunked_array),
])
def test_table_from_pydict_schema(data, klass):
# passed schema is source of truth for the columns
data = OrderedDict([('strs', klass(data[0])), ('floats', klass(data[1]))])
# schema has columns not present in data -> error
schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float64()),
('ints', pa.int64())])
with pytest.raises(KeyError, match='ints'):
pa.Table.from_pydict(data, schema=schema)
# data has columns not present in schema -> ignored
schema = pa.schema([('strs', pa.utf8())])
table = pa.Table.from_pydict(data, schema=schema)
assert table.num_columns == 1
assert table.schema == schema
assert table.column_names == ['strs']
@pytest.mark.parametrize(
('cls'),
[
(pa.Table),
(pa.RecordBatch)
]
)
def test_table_from_pylist(cls):
table = cls.from_pylist([])
assert table.num_columns == 0
assert table.num_rows == 0
assert table.schema == pa.schema([])
assert table.to_pylist() == []
schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float64())])
# With lists as values
data = [{'strs': '', 'floats': 4.5},
{'strs': 'foo', 'floats': 5},
{'strs': 'bar', 'floats': None}]
table = cls.from_pylist(data)
assert table.num_columns == 2
assert table.num_rows == 3
assert table.schema == schema
assert table.to_pylist() == data
# With metadata and inferred schema
metadata = {b'foo': b'bar'}
schema = schema.with_metadata(metadata)
table = cls.from_pylist(data, metadata=metadata)
assert table.schema == schema
assert table.schema.metadata == metadata
assert table.to_pylist() == data
# With explicit schema
table = cls.from_pylist(data, schema=schema)
assert table.schema == schema
assert table.schema.metadata == metadata
assert table.to_pylist() == data
# Cannot pass both schema and metadata
with pytest.raises(ValueError):
cls.from_pylist(data, schema=schema, metadata=metadata)
# Non-convertible values given schema
with pytest.raises(TypeError):
cls.from_pylist([{'c0': 0}, {'c0': 1}, {'c0': 2}],
schema=pa.schema([("c0", pa.string())]))
# Missing schema fields in the passed mapping translate to None
schema = pa.schema([('a', pa.int64()),
('c', pa.int32()),
('d', pa.int16())
])
table = cls.from_pylist(
[{'a': 1, 'b': 3}, {'a': 2, 'b': 4}, {'a': 3, 'b': 5}],
schema=schema
)
data = [{'a': 1, 'c': None, 'd': None},
{'a': 2, 'c': None, 'd': None},
{'a': 3, 'c': None, 'd': None}]
assert table.schema == schema
assert table.to_pylist() == data
# Passed wrong schema type
with pytest.raises(TypeError):
cls.from_pylist([{'a': 1}, {'a': 2}, {'a': 3}], schema={})
# If the dictionaries of rows are not same length
data = [{'strs': '', 'floats': 4.5},
{'floats': 5},
{'strs': 'bar'}]
data2 = [{'strs': '', 'floats': 4.5},
{'strs': None, 'floats': 5},
{'strs': 'bar', 'floats': None}]
table = cls.from_pylist(data)
assert table.num_columns == 2
assert table.num_rows == 3
assert table.to_pylist() == data2
data = [{'strs': ''},
{'strs': 'foo', 'floats': 5},
{'floats': None}]
data2 = [{'strs': ''},
{'strs': 'foo'},
{'strs': None}]
table = cls.from_pylist(data)
assert table.num_columns == 1
assert table.num_rows == 3
assert table.to_pylist() == data2
@pytest.mark.pandas
def test_table_from_pandas_schema():
# passed schema is source of truth for the columns
import pandas as pd
df = pd.DataFrame(OrderedDict([('strs', ['', 'foo', 'bar']),
('floats', [4.5, 5, None])]))
# with different but compatible schema
schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float32())])
table = pa.Table.from_pandas(df, schema=schema)
assert pa.types.is_float32(table.column('floats').type)
assert table.schema.remove_metadata() == schema
# with different and incompatible schema
schema = pa.schema([('strs', pa.utf8()), ('floats', pa.timestamp('s'))])
with pytest.raises((NotImplementedError, TypeError)):
pa.Table.from_pandas(df, schema=schema)
# schema has columns not present in data -> error
schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float64()),
('ints', pa.int64())])
with pytest.raises(KeyError, match='ints'):
pa.Table.from_pandas(df, schema=schema)
# data has columns not present in schema -> ignored
schema = pa.schema([('strs', pa.utf8())])
table = pa.Table.from_pandas(df, schema=schema)
assert table.num_columns == 1
assert table.schema.remove_metadata() == schema
assert table.column_names == ['strs']
@pytest.mark.pandas
def test_table_factory_function():
import pandas as pd
# Put in wrong order to make sure that lines up with schema
d = OrderedDict([('b', ['a', 'b', 'c']), ('a', [1, 2, 3])])
d_explicit = {'b': pa.array(['a', 'b', 'c'], type='string'),
'a': pa.array([1, 2, 3], type='int32')}
schema = pa.schema([('a', pa.int32()), ('b', pa.string())])
df = pd.DataFrame(d)
table1 = pa.table(df)
table2 = pa.Table.from_pandas(df)
assert table1.equals(table2)
table1 = pa.table(df, schema=schema)
table2 = pa.Table.from_pandas(df, schema=schema)
assert table1.equals(table2)
table1 = pa.table(d_explicit)
table2 = pa.Table.from_pydict(d_explicit)
assert table1.equals(table2)
# schema coerces type
table1 = pa.table(d, schema=schema)
table2 = pa.Table.from_pydict(d, schema=schema)
assert table1.equals(table2)
def test_table_factory_function_args():
# from_pydict not accepting names:
with pytest.raises(ValueError):
pa.table({'a': [1, 2, 3]}, names=['a'])
# backwards compatibility for schema as first positional argument
schema = pa.schema([('a', pa.int32())])
table = pa.table({'a': pa.array([1, 2, 3], type=pa.int64())}, schema)
assert table.column('a').type == pa.int32()
# from_arrays: accept both names and schema as positional first argument
data = [pa.array([1, 2, 3], type='int64')]
names = ['a']
table = pa.table(data, names)
assert table.column_names == names
schema = pa.schema([('a', pa.int64())])
table = pa.table(data, schema)
assert table.column_names == names
@pytest.mark.pandas
def test_table_factory_function_args_pandas():
import pandas as pd
# from_pandas not accepting names or metadata:
with pytest.raises(ValueError):
pa.table(pd.DataFrame({'a': [1, 2, 3]}), names=['a'])
with pytest.raises(ValueError):
pa.table(pd.DataFrame({'a': [1, 2, 3]}), metadata={b'foo': b'bar'})
# backwards compatibility for schema as first positional argument
schema = pa.schema([('a', pa.int32())])
table = pa.table(pd.DataFrame({'a': [1, 2, 3]}), schema)
assert table.column('a').type == pa.int32()
def test_factory_functions_invalid_input():
with pytest.raises(TypeError, match="Expected pandas DataFrame, python"):
pa.table("invalid input")
with pytest.raises(TypeError, match="Expected pandas DataFrame"):
pa.record_batch("invalid input")
def test_table_repr_to_string():
# Schema passed explicitly
schema = pa.schema([pa.field('c0', pa.int16(),
metadata={'key': 'value'}),
pa.field('c1', pa.int32())],
metadata={b'foo': b'bar'})
tab = pa.table([pa.array([1, 2, 3, 4], type='int16'),
pa.array([10, 20, 30, 40], type='int32')], schema=schema)
assert str(tab) == """pyarrow.Table
c0: int16
c1: int32
----
c0: [[1,2,3,4]]
c1: [[10,20,30,40]]"""
assert tab.to_string(show_metadata=True) == """\
pyarrow.Table
c0: int16
-- field metadata --
key: 'value'
c1: int32
-- schema metadata --
foo: 'bar'"""
assert tab.to_string(preview_cols=5) == """\
pyarrow.Table
c0: int16
c1: int32
----
c0: [[1,2,3,4]]
c1: [[10,20,30,40]]"""
assert tab.to_string(preview_cols=1) == """\
pyarrow.Table
c0: int16
c1: int32
----
c0: [[1,2,3,4]]
..."""
def test_table_repr_to_string_ellipsis():
# Schema passed explicitly
schema = pa.schema([pa.field('c0', pa.int16(),
metadata={'key': 'value'}),
pa.field('c1', pa.int32())],
metadata={b'foo': b'bar'})
tab = pa.table([pa.array([1, 2, 3, 4]*10, type='int16'),
pa.array([10, 20, 30, 40]*10, type='int32')],
schema=schema)
assert str(tab) == """pyarrow.Table
c0: int16
c1: int32
----
c0: [[1,2,3,4,1,2,3,4,1,2,...,3,4,1,2,3,4,1,2,3,4]]
c1: [[10,20,30,40,10,20,30,40,10,20,...,30,40,10,20,30,40,10,20,30,40]]"""
def test_table_function_unicode_schema():
col_a = "äääh"
col_b = "öööf"
# Put in wrong order to make sure that lines up with schema
d = OrderedDict([(col_b, ['a', 'b', 'c']), (col_a, [1, 2, 3])])
schema = pa.schema([(col_a, pa.int32()), (col_b, pa.string())])
result = pa.table(d, schema=schema)
assert result[0].chunk(0).equals(pa.array([1, 2, 3], type='int32'))
assert result[1].chunk(0).equals(pa.array(['a', 'b', 'c'], type='string'))
def test_table_take_vanilla_functionality():
table = pa.table(
[pa.array([1, 2, 3, None, 5]),
pa.array(['a', 'b', 'c', 'd', 'e'])],
['f1', 'f2'])
assert table.take(pa.array([2, 3])).equals(table.slice(2, 2))
def test_table_take_null_index():
table = pa.table(
[pa.array([1, 2, 3, None, 5]),
pa.array(['a', 'b', 'c', 'd', 'e'])],
['f1', 'f2'])
result_with_null_index = pa.table(
[pa.array([1, None]),
pa.array(['a', None])],
['f1', 'f2'])
assert table.take(pa.array([0, None])).equals(result_with_null_index)
def test_table_take_non_consecutive():
table = pa.table(
[pa.array([1, 2, 3, None, 5]),
pa.array(['a', 'b', 'c', 'd', 'e'])],
['f1', 'f2'])
result_non_consecutive = pa.table(
[pa.array([2, None]),
pa.array(['b', 'd'])],
['f1', 'f2'])
assert table.take(pa.array([1, 3])).equals(result_non_consecutive)
def test_table_select():
a1 = pa.array([1, 2, 3, None, 5])
a2 = pa.array(['a', 'b', 'c', 'd', 'e'])
a3 = pa.array([[1, 2], [3, 4], [5, 6], None, [9, 10]])
table = pa.table([a1, a2, a3], ['f1', 'f2', 'f3'])
# selecting with string names
result = table.select(['f1'])
expected = pa.table([a1], ['f1'])
assert result.equals(expected)
result = table.select(['f3', 'f2'])
expected = pa.table([a3, a2], ['f3', 'f2'])
assert result.equals(expected)
# selecting with integer indices
result = table.select([0])
expected = pa.table([a1], ['f1'])
assert result.equals(expected)
result = table.select([2, 1])
expected = pa.table([a3, a2], ['f3', 'f2'])
assert result.equals(expected)
# preserve metadata
table2 = table.replace_schema_metadata({"a": "test"})
result = table2.select(["f1", "f2"])
assert b"a" in result.schema.metadata
# selecting non-existing column raises
with pytest.raises(KeyError, match='Field "f5" does not exist'):
table.select(['f5'])
with pytest.raises(IndexError, match="index out of bounds"):
table.select([5])
# duplicate selection gives duplicated names in resulting table
result = table.select(['f2', 'f2'])
expected = pa.table([a2, a2], ['f2', 'f2'])
assert result.equals(expected)
# selection duplicated column raises
table = pa.table([a1, a2, a3], ['f1', 'f2', 'f1'])
with pytest.raises(KeyError, match='Field "f1" exists 2 times'):
table.select(['f1'])
result = table.select(['f2'])
expected = pa.table([a2], ['f2'])
assert result.equals(expected)
def test_table_group_by():
def sorted_by_keys(d):
# Ensure a guaranteed order of keys for aggregation results.
if "keys2" in d:
keys = tuple(zip(d["keys"], d["keys2"]))
else:
keys = d["keys"]
sorted_keys = sorted(keys)
sorted_d = {"keys": sorted(d["keys"])}
for entry in d:
if entry == "keys":
continue
values = dict(zip(keys, d[entry]))
for k in sorted_keys:
sorted_d.setdefault(entry, []).append(values[k])
return sorted_d
table = pa.table([
pa.array(["a", "a", "b", "b", "c"]),
pa.array(["X", "X", "Y", "Z", "Z"]),
pa.array([1, 2, 3, 4, 5]),
pa.array([10, 20, 30, 40, 50])
], names=["keys", "keys2", "values", "bigvalues"])
r = table.group_by("keys").aggregate([
("values", "hash_sum")
])
assert sorted_by_keys(r.to_pydict()) == {
"keys": ["a", "b", "c"],
"values_sum": [3, 7, 5]
}
r = table.group_by("keys").aggregate([
("values", "hash_sum"),
("values", "hash_count")
])
assert sorted_by_keys(r.to_pydict()) == {
"keys": ["a", "b", "c"],
"values_sum": [3, 7, 5],
"values_count": [2, 2, 1]
}
# Test without hash_ prefix
r = table.group_by("keys").aggregate([
("values", "sum")
])
assert sorted_by_keys(r.to_pydict()) == {
"keys": ["a", "b", "c"],
"values_sum": [3, 7, 5]
}
r = table.group_by("keys").aggregate([
("values", "max"),
("bigvalues", "sum")
])
assert sorted_by_keys(r.to_pydict()) == {
"keys": ["a", "b", "c"],
"values_max": [2, 4, 5],
"bigvalues_sum": [30, 70, 50]
}
r = table.group_by("keys").aggregate([
("bigvalues", "max"),
("values", "sum")
])
assert sorted_by_keys(r.to_pydict()) == {
"keys": ["a", "b", "c"],
"values_sum": [3, 7, 5],
"bigvalues_max": [20, 40, 50]
}
r = table.group_by(["keys", "keys2"]).aggregate([
("values", "sum")
])
assert sorted_by_keys(r.to_pydict()) == {
"keys": ["a", "b", "b", "c"],
"keys2": ["X", "Y", "Z", "Z"],
"values_sum": [3, 3, 4, 5]
}
table_with_nulls = pa.table([
pa.array(["a", "a", "a"]),
pa.array([1, None, None])
], names=["keys", "values"])
r = table_with_nulls.group_by(["keys"]).aggregate([
("values", "count", pc.CountOptions(mode="all"))
])
assert r.to_pydict() == {
"keys": ["a"],
"values_count": [3]
}
r = table_with_nulls.group_by(["keys"]).aggregate([
("values", "count", pc.CountOptions(mode="only_null"))
])
assert r.to_pydict() == {
"keys": ["a"],
"values_count": [2]
}
r = table_with_nulls.group_by(["keys"]).aggregate([
("values", "count", pc.CountOptions(mode="only_valid"))
])
assert r.to_pydict() == {
"keys": ["a"],
"values_count": [1]
}
def test_table_sort_by():
table = pa.table([
pa.array([3, 1, 4, 2, 5]),
pa.array(["b", "a", "b", "a", "c"]),
], names=["values", "keys"])
assert table.sort_by("values").to_pydict() == {
"keys": ["a", "a", "b", "b", "c"],
"values": [1, 2, 3, 4, 5]
}
assert table.sort_by([("values", "descending")]).to_pydict() == {
"keys": ["c", "b", "b", "a", "a"],
"values": [5, 4, 3, 2, 1]
}
|
py | b412aedf3f76cb1d876da85cf702b353dc5911b2 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration_async import ResourceManagementClientConfiguration
from .operations_async import DeploymentsOperations
from .operations_async import ProvidersOperations
from .operations_async import ResourcesOperations
from .operations_async import ResourceGroupsOperations
from .operations_async import TagsOperations
from .operations_async import DeploymentOperationsOperations
from .. import models
class ResourceManagementClient(object):
"""Provides operations for working with resources and resource groups.
:ivar deployments: DeploymentsOperations operations
:vartype deployments: azure.mgmt.resource.resources.v2017_05_10.aio.operations_async.DeploymentsOperations
:ivar providers: ProvidersOperations operations
:vartype providers: azure.mgmt.resource.resources.v2017_05_10.aio.operations_async.ProvidersOperations
:ivar resources: ResourcesOperations operations
:vartype resources: azure.mgmt.resource.resources.v2017_05_10.aio.operations_async.ResourcesOperations
:ivar resource_groups: ResourceGroupsOperations operations
:vartype resource_groups: azure.mgmt.resource.resources.v2017_05_10.aio.operations_async.ResourceGroupsOperations
:ivar tags: TagsOperations operations
:vartype tags: azure.mgmt.resource.resources.v2017_05_10.aio.operations_async.TagsOperations
:ivar deployment_operations: DeploymentOperationsOperations operations
:vartype deployment_operations: azure.mgmt.resource.resources.v2017_05_10.aio.operations_async.DeploymentOperationsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = ResourceManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.deployments = DeploymentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.providers = ProvidersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.resources = ResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.resource_groups = ResourceGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.tags = TagsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.deployment_operations = DeploymentOperationsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ResourceManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
py | b412af7df3f03226810119bd433298423b78f9c0 | import base64
import json
from botocore.exceptions import ClientError as BotoClientError
from .util import *
# Config directory for Docker
__docker_config_directory = os.path.join(os.environ['HOME'], '.docker')
# Docker config file
__docker_config_file = os.path.join(__docker_config_directory, 'config.json')
def login_to_ecs(aws_session, docker_client, **kwargs):
"""
Logs in to AWS's Docker Registry.
:param aws_session: The AWS session.
:param docker_client: The Docker client
:return: None
"""
print('Getting authorization data from AWS...')
try:
authorization_data = get_authorization_data(aws_session)
except Exception as e:
raise Exception('Unable to login to ECR. Make sure AWS credentials are set and valid.')
# Get the authorization token. It contains the username and password for the ECR registry.
if 'authorizationToken' not in authorization_data:
raise Exception('Authorization data is missing an "authorizationToken" (docker registry password)')
authorization_token = authorization_data['authorizationToken']
# Get the proxy endpoint. It's the URL for the ECR registry.
if 'proxyEndpoint' not in authorization_data:
raise Exception('Authorization data is missing a "proxyEndpoint" (docker registry url)')
registry = authorization_data['proxyEndpoint']
# Get the username and password from the authorization token.
decoded = base64.b64decode(authorization_token).decode('utf-8')
username, password = decoded.split(':')
# Log in to the registry
print('Logging into ECR Registry "' + registry + '"...')
try:
result = docker_client.login(username=username, password=password, registry=registry, reauth=True)
except BaseException as e:
print(e)
raise Exception('Error logging into ECR')
if 'Status' not in result or not result['Status'] == 'Login Succeeded':
raise Exception('Error logging into ECR')
# The boto3 login function does not save the authorization token.
# So here we save it manually. to ${HOME}/.docker/config.json
print('Saving Docker login to "' + __docker_config_file + '"...')
__save_docker_login(registry, authorization_token)
if registry.startswith("https://"):
__save_docker_login(registry[len("https://"):], authorization_token)
print('Login Succeeded. You can can push to and pull from "' + registry + '".')
def get_authorization_data(aws_session):
"""
Retrieve authorization data for ECR from AWS.
See http://boto3.readthedocs.io/en/latest/reference/services/ecr.html#ECR.Client.get_authorization_token
:param aws_session: The AWS session.
:return: The first element in the authorizationData array.
"""
aws_client = aws_session.client('ecr')
try:
response = aws_client.get_authorization_token()
except BotoClientError:
raise Exception('Unable to get a login via the AWS client. Have you ran \'autocompose login\' ?')
if 'authorizationData' not in response:
raise Exception('Unable to get a login via the AWS client. Have you ran \'autocompose login\' ?')
authorization_data = response['authorizationData']
if len(authorization_data) == 0:
raise Exception('Authorization data was empty. ')
return authorization_data[0]
def __save_docker_login(registry, authorization_token):
"""
Persist authorization for a Docker registry to the Docker config file.
:param registry: The name of the Docker registry
:param authorization_token: The authorization token which contains the username and password.
:return: None
"""
if os.path.exists(__docker_config_file):
with open(__docker_config_file, 'r') as fd:
config = json.load(fd)
else:
config = {}
if 'auths' not in config:
config['auths'] = {}
if not os.path.exists(__docker_config_directory):
os.mkdir(__docker_config_directory)
config['auths'][registry] = {'auth': authorization_token}
with open(__docker_config_file, 'w+') as fd:
json.dump(config, fd)
|
py | b412b06504564b66d70257550928783488cf16cf | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._hd_insight_management_client import HDInsightManagementClient
__all__ = ['HDInsightManagementClient']
|
gyp | b412b2838c4fd07057bcb4ca0d4d04b2f26c30cb | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'cr_radio_button',
'dependencies': [
'cr_radio_button_behavior'
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'cr_radio_button_behavior',
'dependencies': [
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/paper-behaviors/compiled_resources2.gyp:paper-ripple-behavior-extracted',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
],
}
|
py | b412b30c21d1c60b17cf69fe337282db89bb4539 | #-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import json
from tornado.web import StaticFileHandler
from tornado.log import access_log
def log_request(handler):
"""log a bit more information about each request than tornado's default
- move static file get success to debug-level (reduces noise)
- get proxied IP instead of proxy IP
- log referer for redirect and failed requests
- log user-agent for failed requests
"""
status = handler.get_status()
request = handler.request
if status == 304 or (status < 300 and isinstance(handler, StaticFileHandler)) \
or (status < 300 and request.uri == '/'):
# static-file successes or any 304 FOUND are debug-level
log_method = access_log.debug
elif status < 400:
log_method = access_log.info
elif status < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
ns = dict(
status=status,
method=request.method,
ip=request.remote_ip,
uri=request.uri,
request_time=request_time,
)
msg = "{status} {method} {uri} ({ip}) {request_time:.2f}ms"
if status >= 300:
# log referers on redirects
ns['referer'] = request.headers.get('Referer', 'None')
msg = msg + ' referer={referer}'
if status >= 400:
# log user agent for failed requests
ns['agent'] = request.headers.get('User-Agent', 'Unknown')
msg = msg + ' user-agent={agent}'
if status >= 500 and status != 502:
# log all headers if it caused an error
log_method(json.dumps(request.headers, indent=2))
log_method(msg.format(**ns))
|
py | b412b368c8e9491eaf862a85be2f5404211b1005 | from __future__ import unicode_literals
from pre_commit.languages import helpers
ENVIRONMENT_DIR = None
get_default_version = helpers.basic_get_default_version
healthy = helpers.basic_healthy
install_environment = helpers.no_install
def run_hook(hook, file_args, color):
return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
|
py | b412b3f3a2fd7316cc49a5884b918b588bddfab0 | def countFood():
a = int(input())
b = int(input())
print("Всего", a+b, "шт.")
print("Сколько бананов и ананасов для обязьян?")
countFood()
print("Сколько жуков и червей для ежей?")
countFood()
print("Сколько рыб и моллюсков для выдр?")
countFood()
|
py | b412b54ee8566a99e03280391af3f472a04ee3d8 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAX Wrapper for interacting with the Stackdriver Trace API."""
from google.cloud.gapic.trace.v1 import trace_service_client
from google.cloud.proto.devtools.cloudtrace.v1 import trace_pb2
from google.gax import CallOptions
from google.gax import INITIAL_PAGE
from google.cloud._helpers import make_secure_channel
from google.cloud._http import DEFAULT_USER_AGENT
from google.cloud.iterator import GAXIterator
from google.protobuf.json_format import MessageToDict
from google.protobuf.json_format import ParseDict
class _TraceAPI(object):
"""Wrapper to help mapping trace-related APIs.
See
https://cloud.google.com/trace/docs/reference/v1/rpc/google.devtools.
cloudtrace.v1
:type gax_api:
:class:`~google.cloud.gapic.trace.v1.trace_service_client.
TraceServiceClient`
:param gax_api: API object used to make GAX requests.
:type client: :class:`~google.cloud.trace.client.Client`
:param client: The client that owns this API object.
"""
def __init__(self, gax_api, client):
self._gax_api = gax_api
self.client = client
def patch_traces(self, project_id, traces, options=None):
"""Sends new traces to Stackdriver Trace or updates existing traces.
:type project_id: str
:param project_id: ID of the Cloud project where the trace data is
stored.
:type traces: dict
:param traces: The traces to be patched in the API call.
:type options: :class:`~google.gax.CallOptions`
:param options: (Optional) Overrides the default settings for this
call, e.g, timeout, retries etc.
"""
traces_pb = _traces_mapping_to_pb(traces)
self._gax_api.patch_traces(project_id, traces_pb, options)
def get_trace(self, project_id, trace_id, options=None):
"""Gets a single trace by its ID.
:type project_id: str
:param project_id: ID of the Cloud project where the trace data is
stored.
:type trace_id: str
:param trace_id: ID of the trace to return.
:type options: :class:`~google.gax.CallOptions`
:param options: (Optional) Overrides the default settings for this
call, e.g, timeout, retries etc.
:rtype: :dict
:returns: A Trace dict.
"""
trace_pb = self._gax_api.get_trace(project_id, trace_id, options)
trace_mapping = _parse_trace_pb(trace_pb)
return trace_mapping
def list_traces(
self,
project_id,
view=None,
page_size=None,
start_time=None,
end_time=None,
filter_=None,
order_by=None,
page_token=None):
"""Returns of a list of traces that match the specified filter
conditions.
:type project_id: str
:param project_id: ID of the Cloud project where the trace data is
stored.
:type view: :class:`google.cloud.gapic.trace.v1.enums.
ListTracesRequest.ViewType`
:param view: (Optional) Type of data returned for traces in the list.
Default is ``MINIMAL``.
:type page_size: int
:param page_size: (Optional) Maximum number of traces to return.
If not specified or <= 0, the implementation selects
a reasonable value. The implementation may return
fewer traces than the requested page size.
:type start_time: :class:`google.protobuf.timestamp_pb2.Timestamp`
:param start_time: (Optional) Start of the time interval (inclusive)
during which the trace data was collected from the
application.
:type end_time: :class:`google.protobuf.timestamp_pb2.Timestamp`
:param end_time: (Optional) End of the time interval (inclusive)
during which the trace data was collected from the
application.
:type filter_: str
:param filter_: (Optional) An optional filter for the request.
:type order_by: str
:param order_by: (Optional) Field used to sort the returned traces.
:type page_token: str
:param page_token: opaque marker for the next "page" of entries. If not
passed, the API will return the first page of
entries.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Traces that match the specified filter conditions.
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
page_iter = self._gax_api.list_traces(
project_id=project_id,
view=view,
page_size=page_size,
start_time=start_time,
end_time=end_time,
filter_=filter_,
order_by=order_by,
options=options)
item_to_value = _item_to_mapping
return GAXIterator(self.client, page_iter, item_to_value)
def _parse_trace_pb(trace_pb):
"""Parse a ``Trace`` protobuf to a dictionary.
:type trace_pb: :class:`google.cloud.proto.devtools.cloudtrace.v1.
trace_pb2.Trace`
:param trace_pb: A trace protobuf instance.
:rtype: dict
:returns: The converted trace dict.
"""
try:
return MessageToDict(trace_pb)
except TypeError:
raise
def _item_to_mapping(iterator, trace_pb):
"""Helper callable function for the GAXIterator
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type trace_pb: :class:`google.cloud.proto.devtools.cloudtrace.v1.
trace_pb2.Trace`
:param trace_pb: A trace protobuf instance.
"""
mapping = _parse_trace_pb(trace_pb)
return mapping
def make_gax_trace_api(client):
"""Create an instance of the GAX Trace API.
:type client: :class:`~google.cloud.trace.client.Client`
:param client: The client that holds configuration details.
:rtype: :class:`~google.cloud.trace._gax._TraceAPI`
:returns: A Trace API instance with the proper configurations.
"""
channel = make_secure_channel(
client._credentials,
DEFAULT_USER_AGENT,
trace_service_client.TraceServiceClient.SERVICE_ADDRESS)
generated = trace_service_client.TraceServiceClient(
channel=channel,
lib_name='gccl')
return _TraceAPI(generated, client)
def _traces_mapping_to_pb(traces_mapping):
"""Convert a trace dict to protobuf.
:type traces_mapping: dict
:param traces_mapping: A trace mapping.
:rtype: class:`google.cloud.proto.devtools.cloudtrace.v1.trace_pb2.Traces`
:returns: The converted protobuf type traces.
"""
traces_pb = trace_pb2.Traces()
ParseDict(traces_mapping, traces_pb)
return traces_pb
|
py | b412b5ea37cef9cb65d4c398aa0ddca131563b25 | VERSION_MAJOR = "16.10"
VERSION_MINOR = "dev"
VERSION = VERSION_MAJOR + ('.' + VERSION_MINOR if VERSION_MINOR else '')
|
py | b412b64387e3c512e4272adb49617808cbf5d0bb | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitConnectionsOperations:
"""ExpressRouteCircuitConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitConnection":
"""Gets the specified Express Route Circuit Connection from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_03_01.models.ExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
express_route_circuit_connection_parameters: "_models.ExpressRouteCircuitConnection",
**kwargs: Any
) -> "_models.ExpressRouteCircuitConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(express_route_circuit_connection_parameters, 'ExpressRouteCircuitConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
express_route_circuit_connection_parameters: "_models.ExpressRouteCircuitConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitConnection"]:
"""Creates or updates a Express Route Circuit Connection in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:param express_route_circuit_connection_parameters: Parameters supplied to the create or update
express route circuit connection operation.
:type express_route_circuit_connection_parameters: ~azure.mgmt.network.v2021_03_01.models.ExpressRouteCircuitConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_03_01.models.ExpressRouteCircuitConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
express_route_circuit_connection_parameters=express_route_circuit_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCircuitConnectionListResult"]:
"""Gets all global reach connections associated with a private peering in an express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_03_01.models.ExpressRouteCircuitConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections'} # type: ignore
|
py | b412b69951409e1dadd58e10639f04eff319d6fa | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from lightly.openapi_generated.swagger_client.configuration import Configuration
class TagName(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, _configuration=None): # noqa: E501
"""TagName - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TagName, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TagName):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TagName):
return True
return self.to_dict() != other.to_dict()
|
py | b412b7426db12bd4e308140edfb18d98bf7c33e3 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_pubsub_subscription
description:
- A named resource representing the stream of messages from a single, specific topic,
to be delivered to the subscribing application.
short_description: Creates a GCP Subscription
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
name:
description:
- Name of the subscription.
required: true
type: str
topic:
description:
- A reference to a Topic resource.
- 'This field represents a link to a Topic resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''name'' and value of
your resource''s name Alternatively, you can add `register: name-of-resource`
to a gcp_pubsub_topic task and then set this topic field to "{{ name-of-resource
}}"'
required: true
type: dict
labels:
description:
- A set of key/value label pairs to assign to this Subscription.
required: false
type: dict
push_config:
description:
- If push delivery is used with this subscription, this field is used to configure
it. An empty pushConfig signifies that the subscriber will pull and ack messages
using API methods.
required: false
type: dict
suboptions:
oidc_token:
description:
- If specified, Pub/Sub will generate and attach an OIDC JWT token as an Authorization
header in the HTTP request for every pushed message.
required: false
type: dict
suboptions:
service_account_email:
description:
- Service account email to be used for generating the OIDC token.
- The caller (for subscriptions.create, subscriptions.patch, and subscriptions.modifyPushConfig
RPCs) must have the iam.serviceAccounts.actAs permission for the service
account.
required: true
type: str
audience:
description:
- 'Audience to be used when generating OIDC token. The audience claim
identifies the recipients that the JWT is intended for. The audience
value is a single case-sensitive string. Having multiple values (array)
for the audience field is not supported. More info about the OIDC JWT
token audience here: U(https://tools.ietf.org/html/rfc7519#section-4.1.3)
Note: if not specified, the Push endpoint URL will be used.'
required: false
type: str
push_endpoint:
description:
- A URL locating the endpoint to which messages should be pushed.
- For example, a Webhook endpoint might use "U(https://example.com/push").
required: true
type: str
attributes:
description:
- Endpoint configuration attributes.
- Every endpoint has a set of API supported attributes that can be used to
control different aspects of the message delivery.
- The currently supported attribute is x-goog-version, which you can use to
change the format of the pushed message. This attribute indicates the version
of the data expected by the endpoint. This controls the shape of the pushed
message (i.e., its fields and metadata). The endpoint version is based on
the version of the Pub/Sub API.
- If not present during the subscriptions.create call, it will default to
the version of the API used to make such call. If not present during a subscriptions.modifyPushConfig
call, its value will not be changed. subscriptions.get calls will always
return a valid version, even if the subscription was created without this
attribute.
- 'The possible values for this attribute are: - v1beta1: uses the push format
defined in the v1beta1 Pub/Sub API.'
- "- v1 or v1beta2: uses the push format defined in the v1 Pub/Sub API."
required: false
type: dict
ack_deadline_seconds:
description:
- This value is the maximum time after a subscriber receives a message before
the subscriber should acknowledge the message. After message delivery but before
the ack deadline expires and before the message is acknowledged, it is an outstanding
message and will not be delivered again during that time (on a best-effort basis).
- For pull subscriptions, this value is used as the initial value for the ack
deadline. To override this value for a given message, call subscriptions.modifyAckDeadline
with the corresponding ackId if using pull. The minimum custom deadline you
can specify is 10 seconds. The maximum custom deadline you can specify is 600
seconds (10 minutes).
- If this parameter is 0, a default value of 10 seconds is used.
- For push delivery, this value is also used to set the request timeout for the
call to the push endpoint.
- If the subscriber never acknowledges the message, the Pub/Sub system will eventually
redeliver the message.
required: false
type: int
message_retention_duration:
description:
- How long to retain unacknowledged messages in the subscription's backlog, from
the moment a message is published. If retainAckedMessages is true, then this
also configures the retention of acknowledged messages, and thus configures
how far back in time a subscriptions.seek can be done. Defaults to 7 days. Cannot
be more than 7 days (`"604800s"`) or less than 10 minutes (`"600s"`).
- 'A duration in seconds with up to nine fractional digits, terminated by ''s''.
Example: `"600.5s"`.'
required: false
default: 604800s
type: str
retain_acked_messages:
description:
- Indicates whether to retain acknowledged messages. If `true`, then messages
are not expunged from the subscription's backlog, even if they are acknowledged,
until they fall out of the messageRetentionDuration window.
required: false
type: bool
expiration_policy:
description:
- A policy that specifies the conditions for this subscription's expiration.
- A subscription is considered active as long as any connected subscriber is successfully
consuming messages from the subscription or is issuing operations on the subscription.
If expirationPolicy is not set, a default policy with ttl of 31 days will be
used. If it is set but ttl is "", the resource never expires. The minimum allowed
value for expirationPolicy.ttl is 1 day.
required: false
type: dict
suboptions:
ttl:
description:
- Specifies the "time-to-live" duration for an associated resource. The resource
expires if it is not active for a period of ttl.
- If ttl is not set, the associated resource never expires.
- A duration in seconds with up to nine fractional digits, terminated by 's'.
- Example - "3.5s".
required: true
type: str
filter:
description:
- The subscription only delivers the messages that match the filter. Pub/Sub automatically
acknowledges the messages that don't match the filter. You can filter messages
by their attributes. The maximum length of a filter is 256 bytes. After creating
the subscription, you can't modify the filter.
required: false
type: str
dead_letter_policy:
description:
- A policy that specifies the conditions for dead lettering messages in this subscription.
If dead_letter_policy is not set, dead lettering is disabled.
- The Cloud Pub/Sub service account associated with this subscription's parent
project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com)
must have permission to Acknowledge() messages on this subscription.
required: false
type: dict
suboptions:
dead_letter_topic:
description:
- The name of the topic to which dead letter messages should be published.
- Format is `projects/{project}/topics/{topic}`.
- The Cloud Pub/Sub service account associated with the enclosing subscription's
parent project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com)
must have permission to Publish() to this topic.
- The operation will fail if the topic does not exist.
- Users should ensure that there is a subscription attached to this topic
since messages published to a topic with no subscriptions are lost.
required: false
type: str
max_delivery_attempts:
description:
- The maximum number of delivery attempts for any message. The value must
be between 5 and 100.
- The number of delivery attempts is defined as 1 + (the sum of number of
NACKs and number of times the acknowledgement deadline has been exceeded
for the message).
- A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that client
libraries may automatically extend ack_deadlines.
- This field will be honored on a best effort basis.
- If this parameter is 0, a default value of 5 is used.
required: false
type: int
retry_policy:
description:
- A policy that specifies how Pub/Sub retries message delivery for this subscription.
- If not set, the default retry policy is applied. This generally implies that
messages will be retried as soon as possible for healthy subscribers. RetryPolicy
will be triggered on NACKs or acknowledgement deadline exceeded events for a
given message .
required: false
type: dict
suboptions:
minimum_backoff:
description:
- The minimum delay between consecutive deliveries of a given message. Value
should be between 0 and 600 seconds. Defaults to 10 seconds.
- 'A duration in seconds with up to nine fractional digits, terminated by
''s''. Example: "3.5s".'
required: false
type: str
maximum_backoff:
description:
- 'The maximum delay between consecutive deliveries of a given message. Value
should be between 0 and 600 seconds. Defaults to 600 seconds. A duration
in seconds with up to nine fractional digits, terminated by ''s''. Example:
"3.5s".'
required: false
type: str
enable_message_ordering:
description:
- If `true`, messages published with the same orderingKey in PubsubMessage will
be delivered to the subscribers in the order in which they are received by the
Pub/Sub system. Otherwise, they may be delivered in any order.
required: false
type: bool
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
elements: str
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions)'
- 'Managing Subscriptions: U(https://cloud.google.com/pubsub/docs/admin#managing_subscriptions)'
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a topic
google.cloud.gcp_pubsub_topic:
name: topic-subscription
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: topic
- name: create a subscription
google.cloud.gcp_pubsub_subscription:
name: test_object
topic: "{{ topic }}"
ack_deadline_seconds: 300
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
name:
description:
- Name of the subscription.
returned: success
type: str
topic:
description:
- A reference to a Topic resource.
returned: success
type: dict
labels:
description:
- A set of key/value label pairs to assign to this Subscription.
returned: success
type: dict
pushConfig:
description:
- If push delivery is used with this subscription, this field is used to configure
it. An empty pushConfig signifies that the subscriber will pull and ack messages
using API methods.
returned: success
type: complex
contains:
oidcToken:
description:
- If specified, Pub/Sub will generate and attach an OIDC JWT token as an Authorization
header in the HTTP request for every pushed message.
returned: success
type: complex
contains:
serviceAccountEmail:
description:
- Service account email to be used for generating the OIDC token.
- The caller (for subscriptions.create, subscriptions.patch, and subscriptions.modifyPushConfig
RPCs) must have the iam.serviceAccounts.actAs permission for the service
account.
returned: success
type: str
audience:
description:
- 'Audience to be used when generating OIDC token. The audience claim identifies
the recipients that the JWT is intended for. The audience value is a single
case-sensitive string. Having multiple values (array) for the audience
field is not supported. More info about the OIDC JWT token audience here:
U(https://tools.ietf.org/html/rfc7519#section-4.1.3) Note: if not specified,
the Push endpoint URL will be used.'
returned: success
type: str
pushEndpoint:
description:
- A URL locating the endpoint to which messages should be pushed.
- For example, a Webhook endpoint might use "U(https://example.com/push").
returned: success
type: str
attributes:
description:
- Endpoint configuration attributes.
- Every endpoint has a set of API supported attributes that can be used to control
different aspects of the message delivery.
- The currently supported attribute is x-goog-version, which you can use to
change the format of the pushed message. This attribute indicates the version
of the data expected by the endpoint. This controls the shape of the pushed
message (i.e., its fields and metadata). The endpoint version is based on
the version of the Pub/Sub API.
- If not present during the subscriptions.create call, it will default to the
version of the API used to make such call. If not present during a subscriptions.modifyPushConfig
call, its value will not be changed. subscriptions.get calls will always return
a valid version, even if the subscription was created without this attribute.
- 'The possible values for this attribute are: - v1beta1: uses the push format
defined in the v1beta1 Pub/Sub API.'
- "- v1 or v1beta2: uses the push format defined in the v1 Pub/Sub API."
returned: success
type: dict
ackDeadlineSeconds:
description:
- This value is the maximum time after a subscriber receives a message before the
subscriber should acknowledge the message. After message delivery but before the
ack deadline expires and before the message is acknowledged, it is an outstanding
message and will not be delivered again during that time (on a best-effort basis).
- For pull subscriptions, this value is used as the initial value for the ack deadline.
To override this value for a given message, call subscriptions.modifyAckDeadline
with the corresponding ackId if using pull. The minimum custom deadline you can
specify is 10 seconds. The maximum custom deadline you can specify is 600 seconds
(10 minutes).
- If this parameter is 0, a default value of 10 seconds is used.
- For push delivery, this value is also used to set the request timeout for the
call to the push endpoint.
- If the subscriber never acknowledges the message, the Pub/Sub system will eventually
redeliver the message.
returned: success
type: int
messageRetentionDuration:
description:
- How long to retain unacknowledged messages in the subscription's backlog, from
the moment a message is published. If retainAckedMessages is true, then this also
configures the retention of acknowledged messages, and thus configures how far
back in time a subscriptions.seek can be done. Defaults to 7 days. Cannot be more
than 7 days (`"604800s"`) or less than 10 minutes (`"600s"`).
- 'A duration in seconds with up to nine fractional digits, terminated by ''s''.
Example: `"600.5s"`.'
returned: success
type: str
retainAckedMessages:
description:
- Indicates whether to retain acknowledged messages. If `true`, then messages are
not expunged from the subscription's backlog, even if they are acknowledged, until
they fall out of the messageRetentionDuration window.
returned: success
type: bool
expirationPolicy:
description:
- A policy that specifies the conditions for this subscription's expiration.
- A subscription is considered active as long as any connected subscriber is successfully
consuming messages from the subscription or is issuing operations on the subscription.
If expirationPolicy is not set, a default policy with ttl of 31 days will be used.
If it is set but ttl is "", the resource never expires. The minimum allowed value
for expirationPolicy.ttl is 1 day.
returned: success
type: complex
contains:
ttl:
description:
- Specifies the "time-to-live" duration for an associated resource. The resource
expires if it is not active for a period of ttl.
- If ttl is not set, the associated resource never expires.
- A duration in seconds with up to nine fractional digits, terminated by 's'.
- Example - "3.5s".
returned: success
type: str
filter:
description:
- The subscription only delivers the messages that match the filter. Pub/Sub automatically
acknowledges the messages that don't match the filter. You can filter messages
by their attributes. The maximum length of a filter is 256 bytes. After creating
the subscription, you can't modify the filter.
returned: success
type: str
deadLetterPolicy:
description:
- A policy that specifies the conditions for dead lettering messages in this subscription.
If dead_letter_policy is not set, dead lettering is disabled.
- The Cloud Pub/Sub service account associated with this subscription's parent project
(i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have
permission to Acknowledge() messages on this subscription.
returned: success
type: complex
contains:
deadLetterTopic:
description:
- The name of the topic to which dead letter messages should be published.
- Format is `projects/{project}/topics/{topic}`.
- The Cloud Pub/Sub service account associated with the enclosing subscription's
parent project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com)
must have permission to Publish() to this topic.
- The operation will fail if the topic does not exist.
- Users should ensure that there is a subscription attached to this topic since
messages published to a topic with no subscriptions are lost.
returned: success
type: str
maxDeliveryAttempts:
description:
- The maximum number of delivery attempts for any message. The value must be
between 5 and 100.
- The number of delivery attempts is defined as 1 + (the sum of number of NACKs
and number of times the acknowledgement deadline has been exceeded for the
message).
- A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that client
libraries may automatically extend ack_deadlines.
- This field will be honored on a best effort basis.
- If this parameter is 0, a default value of 5 is used.
returned: success
type: int
retryPolicy:
description:
- A policy that specifies how Pub/Sub retries message delivery for this subscription.
- If not set, the default retry policy is applied. This generally implies that messages
will be retried as soon as possible for healthy subscribers. RetryPolicy will
be triggered on NACKs or acknowledgement deadline exceeded events for a given
message .
returned: success
type: complex
contains:
minimumBackoff:
description:
- The minimum delay between consecutive deliveries of a given message. Value
should be between 0 and 600 seconds. Defaults to 10 seconds.
- 'A duration in seconds with up to nine fractional digits, terminated by ''s''.
Example: "3.5s".'
returned: success
type: str
maximumBackoff:
description:
- 'The maximum delay between consecutive deliveries of a given message. Value
should be between 0 and 600 seconds. Defaults to 600 seconds. A duration in
seconds with up to nine fractional digits, terminated by ''s''. Example: "3.5s".'
returned: success
type: str
enableMessageOrdering:
description:
- If `true`, messages published with the same orderingKey in PubsubMessage will
be delivered to the subscribers in the order in which they are received by the
Pub/Sub system. Otherwise, they may be delivered in any order.
returned: success
type: bool
'''
################################################################################
# Imports
################################################################################
from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import (
navigate_hash,
GcpSession,
GcpModule,
GcpRequest,
remove_nones_from_dict,
replace_resource_dict,
)
import json
import re
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
topic=dict(required=True, type='dict'),
labels=dict(type='dict'),
push_config=dict(
type='dict',
options=dict(
oidc_token=dict(type='dict', options=dict(service_account_email=dict(required=True, type='str'), audience=dict(type='str'))),
push_endpoint=dict(required=True, type='str'),
attributes=dict(type='dict'),
),
),
ack_deadline_seconds=dict(type='int'),
message_retention_duration=dict(default='604800s', type='str'),
retain_acked_messages=dict(type='bool'),
expiration_policy=dict(type='dict', options=dict(ttl=dict(required=True, type='str'))),
filter=dict(type='str'),
dead_letter_policy=dict(type='dict', options=dict(dead_letter_topic=dict(type='str'), max_delivery_attempts=dict(type='int'))),
retry_policy=dict(type='dict', options=dict(minimum_backoff=dict(type='str'), maximum_backoff=dict(type='str'))),
enable_message_ordering=dict(type='bool'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/pubsub']
state = module.params['state']
fetch = fetch_resource(module, self_link(module))
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), fetch)
fetch = fetch_resource(module, self_link(module))
changed = True
else:
delete(module, self_link(module))
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, self_link(module))
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link):
auth = GcpSession(module, 'pubsub')
return return_if_object(module, auth.put(link, resource_to_request(module)))
def update(module, link, fetch):
auth = GcpSession(module, 'pubsub')
params = {'updateMask': updateMask(resource_to_request(module), response_to_hash(module, fetch))}
request = resource_to_request(module)
del request['name']
return return_if_object(module, auth.patch(link, request, params=params))
def updateMask(request, response):
update_mask = []
if request.get('labels') != response.get('labels'):
update_mask.append('labels')
if request.get('pushConfig') != response.get('pushConfig'):
update_mask.append('pushConfig')
if request.get('ackDeadlineSeconds') != response.get('ackDeadlineSeconds'):
update_mask.append('ackDeadlineSeconds')
if request.get('messageRetentionDuration') != response.get('messageRetentionDuration'):
update_mask.append('messageRetentionDuration')
if request.get('retainAckedMessages') != response.get('retainAckedMessages'):
update_mask.append('retainAckedMessages')
if request.get('expirationPolicy') != response.get('expirationPolicy'):
update_mask.append('expirationPolicy')
if request.get('deadLetterPolicy') != response.get('deadLetterPolicy'):
update_mask.append('deadLetterPolicy')
if request.get('retryPolicy') != response.get('retryPolicy'):
update_mask.append('retryPolicy')
return ','.join(update_mask)
def delete(module, link):
auth = GcpSession(module, 'pubsub')
return return_if_object(module, auth.delete(link))
def resource_to_request(module):
request = {
u'name': name_pattern(module.params.get('name'), module),
u'topic': topic_pattern(replace_resource_dict(module.params.get(u'topic', {}), 'name'), module),
u'labels': module.params.get('labels'),
u'pushConfig': SubscriptionPushconfig(module.params.get('push_config', {}), module).to_request(),
u'ackDeadlineSeconds': module.params.get('ack_deadline_seconds'),
u'messageRetentionDuration': module.params.get('message_retention_duration'),
u'retainAckedMessages': module.params.get('retain_acked_messages'),
u'expirationPolicy': SubscriptionExpirationpolicy(module.params.get('expiration_policy', {}), module).to_request(),
u'filter': module.params.get('filter'),
u'deadLetterPolicy': SubscriptionDeadletterpolicy(module.params.get('dead_letter_policy', {}), module).to_request(),
u'retryPolicy': SubscriptionRetrypolicy(module.params.get('retry_policy', {}), module).to_request(),
u'enableMessageOrdering': module.params.get('enable_message_ordering'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, allow_not_found=True):
auth = GcpSession(module, 'pubsub')
return return_if_object(module, auth.get(link), allow_not_found)
def self_link(module):
return "https://pubsub.googleapis.com/v1/projects/{project}/subscriptions/{name}".format(**module.params)
def collection(module):
return "https://pubsub.googleapis.com/v1/projects/{project}/subscriptions".format(**module.params)
def return_if_object(module, response, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'name': name_pattern(module.params.get('name'), module),
u'topic': topic_pattern(replace_resource_dict(module.params.get(u'topic', {}), 'name'), module),
u'labels': response.get(u'labels'),
u'pushConfig': SubscriptionPushconfig(response.get(u'pushConfig', {}), module).from_response(),
u'ackDeadlineSeconds': response.get(u'ackDeadlineSeconds'),
u'messageRetentionDuration': response.get(u'messageRetentionDuration'),
u'retainAckedMessages': response.get(u'retainAckedMessages'),
u'expirationPolicy': SubscriptionExpirationpolicy(response.get(u'expirationPolicy', {}), module).from_response(),
u'filter': module.params.get('filter'),
u'deadLetterPolicy': SubscriptionDeadletterpolicy(response.get(u'deadLetterPolicy', {}), module).from_response(),
u'retryPolicy': SubscriptionRetrypolicy(response.get(u'retryPolicy', {}), module).from_response(),
u'enableMessageOrdering': module.params.get('enable_message_ordering'),
}
def name_pattern(name, module):
if name is None:
return
regex = r"projects/.*/subscriptions/.*"
if not re.match(regex, name):
name = "projects/{project}/subscriptions/{name}".format(**module.params)
return name
def topic_pattern(name, module):
if name is None:
return
regex = r"projects/.*/topics/.*"
if not re.match(regex, name):
formatted_params = {
'project': module.params['project'],
'topic': replace_resource_dict(module.params['topic'], 'name'),
}
name = "projects/{project}/topics/{topic}".format(**formatted_params)
return name
class SubscriptionPushconfig(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'oidcToken': SubscriptionOidctoken(self.request.get('oidc_token', {}), self.module).to_request(),
u'pushEndpoint': self.request.get('push_endpoint'),
u'attributes': self.request.get('attributes'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'oidcToken': SubscriptionOidctoken(self.request.get(u'oidcToken', {}), self.module).from_response(),
u'pushEndpoint': self.request.get(u'pushEndpoint'),
u'attributes': self.request.get(u'attributes'),
}
)
class SubscriptionOidctoken(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'serviceAccountEmail': self.request.get('service_account_email'), u'audience': self.request.get('audience')})
def from_response(self):
return remove_nones_from_dict({u'serviceAccountEmail': self.request.get(u'serviceAccountEmail'), u'audience': self.request.get(u'audience')})
class SubscriptionExpirationpolicy(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'ttl': self.request.get('ttl')})
def from_response(self):
return remove_nones_from_dict({u'ttl': self.request.get(u'ttl')})
class SubscriptionDeadletterpolicy(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{u'deadLetterTopic': self.request.get('dead_letter_topic'), u'maxDeliveryAttempts': self.request.get('max_delivery_attempts')}
)
def from_response(self):
return remove_nones_from_dict(
{u'deadLetterTopic': self.request.get(u'deadLetterTopic'), u'maxDeliveryAttempts': self.request.get(u'maxDeliveryAttempts')}
)
class SubscriptionRetrypolicy(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'minimumBackoff': self.request.get('minimum_backoff'), u'maximumBackoff': self.request.get('maximum_backoff')})
def from_response(self):
return remove_nones_from_dict({u'minimumBackoff': self.request.get(u'minimumBackoff'), u'maximumBackoff': self.request.get(u'maximumBackoff')})
if __name__ == '__main__':
main()
|
py | b412b7f41327363c21c14b77deaa0684422c7ae1 | from itertools import repeat
def main():
for i in range(1, int(input())):
# Can't use another for...
# print(sum((i * 10**j) for j in range(i)))
print(sum(map(lambda t: t[1] * 10**t[0], (enumerate(repeat(i, i))))))
if __name__ == '__main__':
main()
|
py | b412b91978dc2a0abc9dd374316ee6dc1c932fba | from flask import Flask, render_template
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from config import config
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
|
py | b412b9f6acf1ae53d5f27c4c69af7983fb8d00d1 | """
Django settings for project project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tx-%t!!6l!$+f&6*l33j1e_i985oop8=8eg7x24r0-e#)-9l@x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'conf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'conf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
py | b412ba8cd1cd53b58c65aa89c12c8770a145ff04 | import copy
import logging
logger = logging.getLogger(__name__)
def verify_boot_code(boot_code):
"""
Verifies the input boot code, computing the accumulator value and checking if the program runs infinitely. If an
infinite loop is detected, the execution is halted. The function returns a tuple where the first element is the
value of the accumulator when the program is finished, and the second element is a boolean indicating whether the
program runs infinitely or not.
"""
instructions = [x.split(" ") for x in boot_code.split("\n")]
instructions_count = len(instructions)
executed_instructions = set()
accumulator = 0
current_instruction_index = 0
is_infinite = False
# Instruction loop
while current_instruction_index < instructions_count:
# If the current instruction was executed before, an infinite loop has been found, so stop processing
if current_instruction_index in executed_instructions:
is_infinite = True
break
executed_instructions.add(current_instruction_index)
instruction_type, instruction_argument = instructions[current_instruction_index]
if instruction_type == "nop":
current_instruction_index += 1
elif instruction_type == "acc":
accumulator += int(instruction_argument)
current_instruction_index += 1
elif instruction_type == "jmp":
current_instruction_index += int(instruction_argument)
return accumulator, is_infinite
def fix_boot_code(boot_code):
"""
Fixes the given boot code, that runs infinitely, by morphing one 'nop' or 'jmp' at a time and running the program.
It returns the value of the accumulator when the modified program is no longer infinite.
"""
instructions = [x.split(" ") for x in boot_code.split("\n")]
# Go over each instruction in the program
for i, instruction in enumerate(instructions):
# Don't modify 'acc' instructions
if instruction[0] == "acc":
continue
# Make a copy of the program, morphing the i-th instruction
modified_instructions = copy.deepcopy(instructions)
modified_instructions[i][0] = (
"jmp" if modified_instructions[i][0] == "nop" else "nop"
)
# Check if the modified program runs infinitely
acc, is_infinite = verify_boot_code(
"\n".join([" ".join(x) for x in modified_instructions])
)
if not is_infinite:
return acc
return None
|
py | b412bab27186855c5632ce60183f9e29a651d9eb | class Cooking:
def __init__(self, time, temp, pressure, desired):
self.well_done = 3000
self.medium = 2500
self.cooked_constant = 0.05
self.time = time
self.temp = temp
self.pressure = pressure
self.desired = desired
def is_cookeding_criteria_satisfied(self):
return is_well_done() or is_medium()
def is_well_done(self):
return self.desired == 'well-done' and \
get_cooking_progress() >= self.well_done
def is_medium(self):
return self.desired == 'medium' and \
get_cooking_progress() >= self.medium
def get_cooking_progress(self):
return self.time * self.temp * self.pressure * self.cooked_constant
TIME = 30 # [min]
TEMP = 103 # [celsius]
PRESSURE = 20 # [psi]
DESIRED_STATE = 'well-done'
COOKING = Cooking(TIME, TEMP, PRESSURE, DESIRED_STATE)
if COOKING.is_cookeding_criteria_satisfied():
print('cooking is done.')
else:
print('ongoing cooking.')
|
py | b412bac8bfe2471c3787edf63d5112a2eccbbe21 | """
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
import logging
from overrides import overrides
from cibyl.cli.query import QueryType
from cibyl.outputs.cli.ci.system.printer import CISystemPrinter
from cibyl.outputs.cli.ci.system.utils.sorting.builds import SortBuildsByUUID
from cibyl.outputs.cli.ci.system.utils.sorting.jobs import SortJobsByName
from cibyl.outputs.cli.printer import ColoredPrinter
from cibyl.utils.colors import DefaultPalette
from cibyl.utils.sorting import BubbleSortAlgorithm
from cibyl.utils.strings import IndentedTextBuilder
LOG = logging.getLogger(__name__)
class ColoredBaseSystemPrinter(ColoredPrinter, CISystemPrinter):
"""Default printer for all system models. This one is decorated with
colors for easier read.
"""
def __init__(self,
query=QueryType.NONE,
verbosity=0,
palette=DefaultPalette(),
job_sorter=BubbleSortAlgorithm(SortJobsByName()),
build_sorter=BubbleSortAlgorithm(SortBuildsByUUID())):
"""Constructor. See parent for more information.
:param job_sorter: Determines the order on which jobs are printed.
:type job_sorter: :class:`cibyl.utils.sorting.SortingAlgorithm`
:param build_sorter: Determines the order on which builds are printed.
:type build_sorter: :class:`cibyl.utils.sorting.SortingAlgorithm`
"""
super().__init__(query, verbosity, palette)
self._job_sorter = job_sorter
self._build_sorter = build_sorter
@overrides
def print_system(self, system):
printer = IndentedTextBuilder()
printer.add(self._palette.blue('System: '), 0)
printer[-1].append(system.name.value)
if self.verbosity > 0:
printer[-1].append(f' (type: {system.system_type.value})')
if self.query in (QueryType.FEATURES_JOBS, QueryType.FEATURES):
for feature in system.features.values():
printer.add(self.print_feature(feature), 1)
return printer.build()
def print_feature(self, feature):
"""Print a feature present in a system.
:param feature: The feature.
:type feature: :class:`cibyl.models.ci.base.feature.Feature`
:return: Textual representation of the provided model.
:rtype: str
"""
printer = IndentedTextBuilder()
name = feature.name.value
present = feature.present.value
printer.add(self.palette.blue(f'{name} feature: '), 0)
printer[-1].append(present)
return printer.build()
|
wsgi | b412bba5f240395e9328c194070f79b6efd60a23 | #!/usr/bin/python
import os
import sys
import logging
activate_this = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'venv/bin/activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
sys.path.insert(0,"/var/www/SuperPhy/")
#sys.path.insert(0,"/var/www/html/SuperPhy/")
from SuperPhy import app as application
application.secret_key = 'Add your secret key'
|
py | b412bc06c178e1233aa1ae2a84ee7a7c0eb24cfa | #! /usr/bin/env python
############################################################################
# Copyright (c) 2015 Saint Petersburg State University
# All Rights Reserved
# See file LICENSE for details.
############################################################################
import SeqIO
from SeqIO import SeqRecord
import sys
import os
import shutil
import sam_parser
import itertools
def ConstructCoverage(sam, contigs, k):
cov = dict()
for contig in range(len(contigs)):
cov[contig] = [0] * (len(contigs[contig]) + 1)
reads = []
for rec in sam:
reads.append(rec)
if len(reads) == 2:
if reads[0].proper_alignment:
if reads[0].pos + k - 1 < reads[1].pos + reads[1].alen - k:
cov[reads[0].tid][reads[0].pos + k - 1] += 1
cov[reads[0].tid][reads[1].pos + reads[1].alen - k] -= 1
else:
if reads[1].pos + k - 1 < reads[0].pos + reads[0].alen - k:
cov[reads[1].tid][reads[1].pos + k - 1] += 1
cov[reads[0].tid][reads[0].pos + reads[0].alen - k] -= 1
reads = []
return cov
def ConstructCoverageSingle(sam, contigs, k):
cov = dict()
for contig in range(len(contigs)):
cov[contig] = [0] * (len(contigs[contig]) + 1)
for rec in sam:
if rec.proper_alignment:
if rec.pos + k - 1 < rec.pos + rec.alen - k:
cov[rec.tid][rec.pos + k - 1] += 1
cov[rec.tid][rec.pos + rec.alen - k] -= 1
return cov
def OutputHist(cov, contigs, folder):
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
for contig in range(len(contigs)):
f = open(folder + "/" + contigs[contig].id, "w")
cur = 0
for i in range(len(cov[contig])):
cur += cov[contig][i]
f.write(str(i) + " " + str(cur) + "\n")
f.close()
def ConstructSimpleCoverage(sam, contigs, k):
simple_cov = dict()
for contig in range(len(contigs)):
simple_cov[contig] = [0] * (len(contigs[contig]) + 1)
for rec in sam:
if not rec.is_unmapped:
simple_cov[rec.tid][rec.pos] += 1
simple_cov[rec.tid][rec.pos + rec.alen] -= 1
return simple_cov
def BreakContig(cov, k, min0):
l = len(cov) - 1
if l < 2 * k:
return []
result = []
cur = 0
cur_len0 = 0
prev_break = 0
for i in range(l):
cur += cov[i]
if cur == 0:
cur_len0 += 1
else:
if cur_len0 == i:
prev_break = max(0, i - k)
elif cur_len0 > min0:
result.append([prev_break, i - cur_len0])
prev_break = i
cur_len0 = 0
result.append([prev_break, min(l, l - cur_len0 + k)])
return result
class ContigBreaker:
def __init__(self, contigs, sam, k, min0):
self.part_list_ = []
self.contigs = contigs
self.sam = sam
cov = ConstructCoverage(self.sam, contigs, k)
# OutputHist(cov, contigs, "tmp")
# simple_cov = ConstructSimpleCoverage(sam, k)
for contig in range(len(contigs)):
parts = BreakContig(cov[contig], k, min0)
self.part_list_.append(parts)
def Break(self, contig):
result = []
#print contig.id
#print self.sam.gettid(contig.id)
for part in self.part_list_[self.sam.gettid(contig.id)]:
result.append(contig.subseq(part[0], part[1]))
return result
def OutputBroken(self, output_file):
output = open(output_file, "w")
for contig in self.contigs:
for subcontig in self.Break(contig):
SeqIO.write(subcontig, output, "fasta")
output.close()
class PatternBreaker:
def __init__(self, pattern, rc_pattern, max_cut):
self.pattern = pattern
self.rc_pattern = rc_pattern
self.max_cut = max_cut
def FindLeftPos(self, seq):
l1 = seq.find(self.pattern)
l2 = seq.find(self.rc_pattern)
if l1 == -1:
l1 = len(seq)
if l2 == -1:
l2 = len(seq)
l = min(l1, l2) + len(self.pattern)
if l < self.max_cut:
return l
else:
return 0
def FindRightPos(self, seq):
l1 = seq.rfind(self.pattern)
l2 = seq.rfind(self.rc_pattern)
if l1 == -1:
l1 = 0
if l2 == -1:
l2 = 0
l = max(l1, l2)
if l > len(seq) - self.max_cut:
return l
else:
return len(seq)
def Break(self, contig):
if len(contig) < 2 * self.max_cut:
return []
l,r = self.FindLeftPos(contig.seq), self.FindRightPos(contig.seq)
return [contig.subseq(l, r)]
class NBreaker:
def __init__(self, min_N):
self.min_N = min_N
def Break(self, contig):
result = []
last_break = 0;
pos = 0
while(pos < len(contig) and contig[pos] == 'N'):
pos += 1
while pos <len(contig):
rpos = pos
while(rpos < len(contig) and contig[rpos] == 'N'):
rpos += 1
if rpos - pos >= self.min_N:
result.append(contig.subseq(last_break, pos))
last_break = rpos
pos = max(rpos, pos + 1)
if last_break != len(contig):
result.append(contig.subseq(last_break, len(contig)))
return result
#if __name__ == '__main__':
# ContigBreaker(sys.argv[1], sys.argv[3], int(sys.argv[4]), int(sys.argv[5])).OutputBroken(sys.argv[2])
|
py | b412bd974377fa7e5525839679ce0b522db74a01 | # Make this code DRY
def same_length(a, b):
"""Return whether positive integers a and b have the same number of digits."""
return find_length(a) == find_length(b)
def find_length(x):
x_digits = 0
while x > 0:
x = x // 10
x_digits = x_digits + 1
return x_digits
print(same_length(50, 70))
print(same_length(50, 100))
print(same_length(10000, 12345))
|
py | b412be501300946901b5efe1a9c151275291edda |
import random
from nltk.tokenize import TweetTokenizer
from nltk.stem.wordnet import WordNetLemmatizer
import re,string
from nltk.tag import pos_tag
from nltk.corpus import stopwords
from nltk.corpus import twitter_samples
def parseTweets(isNewData,tweets,posOrNeg):
print("inside parse")
allCleanedTokens = []
if (isNewData == True):
for tweet in tweets:
tokenizedTweet = tokenizeTweet(tweet)
cleanedTokens = removeNoise(tokenizedTweet, stopwords.words('english'))
allCleanedTokens.append(cleanedTokens)
#wordsAllTweets = get_all_words(allCleanedTokens)
#print(FreqDist(wordsAllTweets).most_common(25))
tokensForModel = get_tweets_for_model(allCleanedTokens)
else:
tweets = 'positive_tweets.json' if (posOrNeg == "positive") else 'negative_tweets.json'
tweet_tokens = twitter_samples.tokenized(tweets)
for tokens in tweet_tokens:
allCleanedTokens.append(removeNoise(tokens, stopwords.words('english')))
tokensForModel = get_tweets_for_model(allCleanedTokens)
#wordsAllTweets = get_all_words(positive_cleaned_tokens_list)
#print(FreqDist(wordsAllTweets).most_common(25))
return tokensForModel
def prepareDataForTraining(isNewData,positiveTweets,negativeTweets):
dataset = []
if(isNewData == True):
data = []
for tweet in negativeTweets:
data.append(tweet)
for tweet in positiveTweets:
data.append(tweet)
random.shuffle(data)
for tweet in data:
element = (tweet['tokenTweet'],tweet['label'])
dataset.append(element)
else:
positive_dataset = [(tweet_dict, "Positive") for tweet_dict in positiveTweets]
negative_dataset = [(tweet_dict, "Negative") for tweet_dict in negativeTweets]
dataset = positive_dataset + negative_dataset
random.shuffle(dataset)
return dataset
def tokenizeTweet(tweets):
return TweetTokenizer().tokenize(tweets)
def removeNoise(tokens,stopWords=()):
cleaned_tokens = []
for token,tag in pos_tag(tokens):
token = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*\(\),]|'\
'(?:%[0-9a-fA-F][0-9a-fA-F]))+','', token)
token = re.sub("(@[A-Za-z0-9_]+)","", token)
pos = lemmatize_sentence(tag)
token = WordNetLemmatizer().lemmatize(token,pos)
if len(token) > 0 and token not in string.punctuation and token.lower() not in stopWords:
cleaned_tokens.append(token.lower())
return cleaned_tokens
def lemmatize_sentence(tag):
#lemmatized_sentence = []
#for word,tag in posTaggedTweet:
if tag.startswith('NN'):
pos = 'n'
elif tag.startswith('VB'):
pos = 'v'
else:
pos = 'a'
return pos
def get_tweets_for_model(allCleanedTokens):
for tweet_tokens in allCleanedTokens:
yield dict([token, True] for token in tweet_tokens)
def get_all_words(cleaned_tokens_list):
for tokens in cleaned_tokens_list:
for token in tokens:
yield token
|
py | b412be66a004777aafa08490b0f01621f2a48ed1 | # AUTOR: DANIEL SOUZA LIMA 15 de setembro de 2021
#tradução do código em .c feito pelo Prof. Dr. Lucas Nicolao
#/* **************************************************************************************
#Algortimo básico simulação dinâmica molecular no ensemble NVE.
#Potencial de pares de Lennard-Jones em D=3, levando em conta todos pares.
# Parâmetros da simulação: temperatura, densidade (rho), passo de tempo
#(dt), número de partículas (N), dimensão do sistema (D)
# Descrições das funções:
#1) force(double r[][D], double a[][D])
#- calcula a força resultante em cada partícula/direção cartesiana, e armazena em 'a'
#2) vverlet(double r[][D], double v[][D], double a[][D])
#- atualização das posições 'r' e velocidades 'v' de acordo com velocity Verlet
#- devolve o valor da energia potencial por partícula
#3) measures(double r[][D], double v[][D], double *energia, double *temp, double *pressao)
#- mede energia total por partícula, temperatura cinética e pressão virial
#- lembrando que 3*temp/2 = energia cinética e energia - 3*temp/2 = energia potencial
#4) double energiacin(double v[][D])
#- devolve energia cinética por partícula
#5) overrelax(double r[][D], double a[][D])
#- suaviza forças de uma condição inicial aleatórias segundo dr/dt = -grad U
#6) initial3D(double r[][D], double v[][D], int qual)
#- condições iniciais aleatórias (qual=0) e cristal cúbico (qual=1) para posições
#- velocidades aleatórias de acordo com parâmetro temperatura
#7) reescalavT(double v[][D], double Talvo)
#- rescala velocidades para atingir temperatura alvo Talvo
#8) reescalarRho(double r[][D], double rho_alvo)
#- reescala posições, tamanho da caixa e densidade para mudar densidade para rho_alvo
#9) printXYZ(double r[][D])
#- imprime (na tela) configurações p/ compor arquivo xyz. 1a partícula cor diferente.
#************************************************************************************** */
import numpy as np
import math
import random
global temperatura, rho, dt, N, D, L, V
temperatura = (1.0)
rho = (0.80)
dt = (0.001)
N = int(100)
D = int(3)
L = pow((N)/rho, 1.0/(D))
V = pow(L, (D))
print("Hello, world!")
def force(r, a):
a = np.zeros((N, D))
dr = np.zeros(D)
en = 0
for i in range(0, N-1):
for j in range(i + 1, N):
d2 = 0
for n in range(0, D):
dr[n] = r[i, n] - r[j, n]
dr[n] = dr[n] - L * math.floor(dr[n] / L + 0.5)
d2 += pow(dr[n], 2)
r2 = 1.0 / d2
r6 = pow(r2, 3)
ff = 48.0 * r2 * r6 * (r6 - 0.5)
for n in range(0, D):
a[i, n] += ff*dr[n]
a[j, n] -= ff*dr[n]
en += 4.0 * r6 * (r6 - 1.0)
return a, en / N
def vverlet(r, v, a):
energia = 0
a, energia = force(r, a)
for i in range(0, N):
for n in range(0, D):
v[i, n] += 0.5*dt*a[i, n]
r[i, n] += dt*v[i, n]
a, energia = force(r, a)
for i in range(0, N):
for n in range(0, D):
v[i, n] += 0.5*dt*a[i, n]
return r, v, a
def measures(r, v, energia, temp, pressao):
dr = np.zeros(D)
d2 = 0
r2 = 0
r6 = 0
ff = 0
virial = 0.
sumv2 = 0.
en = 0
for i in range(0, N - 1):
for n in range(0, D):
sumv2 += v[i, n] * v[i, n]
for j in range(i + 1, N):
d2 = 0
for n in range(0, D):
dr[n] = r[i, n] - r[j, n]
dr[n] = dr[n] - L * math.floor((dr[n] / L) + 0.5)
d2 += pow(dr[n], 2)
r2 = 1.0 / d2
r6 = pow(r2, 3)
ff = 48.0 * r2 * r6 * (r6 - 0.5)
for n in range(0, D):
virial += ff * dr[n] * dr[n]
en += 4.0 * r6 * (r6 - 1.0)
for n in range(0, D):
sumv2 += v[N - 1, n] * v[N-1, n]
energia = sumv2 / (2.0 * (N)) + (en / (N))
temp = sumv2 / ((D) * (N))
pressao = temp * rho + virial / ((V) * (D))
return r, v, energia, temp, pressao
def energiacin(v):
K = 0.0
for i in range(0, N):
for n in range(0, D):
K += v[i, n] * v[i, n]
return K / (2.0 * N)
def overrelax(r, a):
Dt = 0.1
energia = 0.0
a, energia = force(r, a)
for i in range(0, N):
norma = 0.0
for n in range(0, D):
norma += pow(a[i, n], 2)
norma = math.sqrt(norma)
for n in range(0, D):
r[i, n] += Dt * a[i, n] / norma
return
def initial3D(r, v, qual):
somav2 = 0
somav = np.zeros(D)
if qual == 1:
Nsites = int(round(pow(N, 1.0 / D)))
dx = L / (Nsites)
for i in range(0, Nsites):
for j in range(0, Nsites):
for k in range(0, Nsites):
ii = k + Nsites * (j + i * Nsites)
if ii < N:
r[ii, 0] = (i + 0.5) * dx
r[ii, 1] = (j + 0.5) * dx
r[ii, 2] = (k + 0.5) * dx
else:
for i in range(0, N):
for n in range(0, D):
r[i, n] = random.random() * L
for i in range(0, N):
for n in range(0, D):
v[i, n] = random.random() - 0.5
somav[n] += v[i, n]
somav2 += math.pow(v[i, n], 2)
for n in range(0, D):
somav[n] /= N
somav2 /= N
fac = math.sqrt(D * temperatura / somav2)
for i in range(0, N):
for n in range(0, D):
v[i, n] = (v[i, n] - somav[n]) * fac
return v
def reescalavT(v, Talvo):
temp, somav2 = 0
for i in range(0, N):
for n in range(0, D):
somav2 += pow(v[i, n], 2)
somav2 /= N
temp = somav2 / D
fac = math.sqrt(Talvo / temp)
for i in range(0, N):
for n in range(0, D):
v[i, n] *= fac
return v
def reescalarRho(r, rho_alvo):
fac = pow(rho / rho_alvo, 1.0 / D)
for i in range(0, N):
for n in range(0, D):
r[i, n] *= fac
rho = rho_alvo
return rho
def printXYZ(r):
print(str(N) + "\n\n")
for i in range(0, N):
if i == 0:
aux = 'B'
else:
aux = 'A'
print(str(aux))
for n in range(0, D):
print(str(r[i][n] - L * math.floor(r[i][n]/L + 0.5) + L/2.) + "\n")
#Impressora para o OVITO
def impressora_video(r, N, tempo):
buffer = "ITEM: TIMESTEP\n" + str(tempo) + "\n" + "ITEM: NUMBER OF ATOMS\n" + str(N) + "\n" + "ITEM: BOX BOUNDS ss ss pp\n"
buffer += "-1" + " " + "1\n" + "-1" + " " + "1\n"
buffer += "-1" + " " + "1\n" + "ITEM: ATOMS id x y z" + "\n"
with open("particles.dump", 'a') as file_object:
file_object.write(buffer)
for i in range(N):
buffer2 = str(int(i)) + "\t" + str(round(r[i,0],3)) + "\t" + str(round(r[i,1],3)) + "\t" + str(round(r[i,2],3)) + "\n"
file_object.write(buffer2)
#Lê as condições iniciais --------------- COMENTE A CHAMADA da FUNÇÃO "initi3D" SE FOR USÁ-lA
def ler_CondicoesIniciais(nome_do_arquivo):
with open(nome_do_arquivo, "r") as file_object:
dados = np.loadtxt(file_object)
N = dados.shape[0]
r = np.zeros((dados.shape[0],int(dados.shape[1]/2)))
v = np.zeros((dados.shape[0],int(dados.shape[1]/2)))
r = dados[:,0:3]
v = dados[:,3:6]
return r, v, N
# Começo do programa
r = np.zeros((N, D))
v = np.zeros((N, D))
a = np.zeros((N, D))
K = 0.0
U = 0.0
E = 0.0
T = 0.0
P = 0.0
#Condição inicial aleatória
initial3D(r, v, 0) ### COMENTE ISSO SE FOR USAR "ler_CondicoesIniciais("condicoes_iniciais.dat")"
K = energiacin(v)
#Condição inicial espicificada a partir do arquivo "condicoes_iniciais.dat"
#no formato x y z vx vy vz em que cada linha coresponde a uma partícula
#r, v, N = ler_CondicoesIniciais("condicoes_iniciais.dat")
for t in range(0, 5 * N):
overrelax(r, a)
a, U = force(r, a)
if U <= 0:
break
for t in range(0, 1000):
r, v, a = vverlet(r, v, a)
r, v, E, T, P = measures(r, v, E, T, P)
K = 3.0 * T / 2.0
U = E - K
print(str(t) + " " + str(round(K, 6)) + " " + str(round(U, 6)) + " " + str(round(E, 6)) + " " + str(round(T, 6)) + " " + str(round(P, 6)))
impressora_video(r, N, t)
|
py | b412bf28a35c9e7861cfb81e386fb1283e23f115 | import os
"""This method gets the league log from the teams that played matches"""
def get_league_log(sample_data):
league_log = {}
with open(sample_data, encoding='utf-8') as matches:
for match in matches:
match_teams = str(match).strip("\n").split(",")
home_team = str(match_teams[0])[0:-2].strip()
away_team = str(match_teams[1])[0:-2].strip()
initials_points = 0
if home_team not in league_log:
league_log[home_team] = initials_points
if away_team not in league_log:
league_log[away_team] = initials_points
return league_log
"""This method initializes the league table and calculates teams points and finnally print out the updated league table"""
def init(input_file):
league_log = get_league_log(input_file)
with open(input_file, encoding='utf8') as matchResults:
for result in matchResults:
score = str(result).strip("\n").split(",")
home_team = str(score[0])[0:-2].strip()
home_team_goals = str(score[0])[-1]
away_team = str(score[1])[0:-2].strip()
away_team_goals = str(score[1])[-1]
if home_team_goals == away_team_goals:
league_log[home_team] = league_log[home_team] + 1
league_log[away_team] = league_log[away_team] + 1
elif home_team_goals > away_team_goals:
league_log[home_team] = league_log[home_team] + 3
elif home_team_goals < away_team_goals:
league_log[away_team] = league_log[away_team] + 3
league_team_name_sort_list = sorted(league_log.items())
league_team_name_sort_dict = {team: points for team, points in league_team_name_sort_list}
league_team_points_sort_list = sorted(
league_team_name_sort_dict.items(),
key=lambda x: x[1],
reverse=True)
league_table = {team: points for team, points in league_team_points_sort_list}
count = 0
for team in league_table:
count += 1
print(f'{count}. {team}, {league_table[team]} pts')
"""This method prompts for sample data text file, to be used in producing the league table"""
def get_input_data():
data_file_name = input("Enter your sample data text file with it's .txt extension: ")
try:
if os.path.exists(data_file_name):
init(data_file_name)
else:
print("FILE NOT FOUND! Please ensure you provided the correct and existing "
"file name with its extension(.txt).")
get_input_data()
except os.error:
print("FILE NOT FOUND! Please ensure you provided the correct and existing "
"file name with its extension(.txt).")
get_input_data()
if __name__ == '__main__':
get_input_data()
|
py | b412bf6d52bd7daa469418e2ba3eb16df0ec0e1b | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 China Telecommunication Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'chenhg'
from tornado.testing import *
from base_handler import *
import time
import os
import subprocess
driver_serv_cmd = 'coverage run --parallel-mode ms_controller.py'
test_serv_cmd = 'coverage run --parallel-mode test.py'
fake_openo_serv_cmd = 'coverage run --parallel-mode fake_openo.py'
# tunnel_server_cmd = 'coverage run --parallel-mode tunnel_server.py'
# cus_server_cmd = 'coverage run --parallel-mode customer_server.py'
# ms_controller_cmd = 'coverage run --parallel-mode ms_controller.py'
# os.system(command)
driver_prefix_nodes_uri = r'http://127.0.0.1:8670/openoapi/sdno-driver-ct-te/v1/nodes:'
driver_prefix_lsps_uri = r'http://127.0.0.1:8670/openoapi/sdno-driver-ct-te/v1/lsps:'
driver_prefix_flow_policy_uri = r'http://127.0.0.1:8670/openoapi/sdno-driver-ct-te/v1/flow-policy:'
driver_prefix_links_uri = r'http://127.0.0.1:8670/openoapi/sdno-driver-ct-te/v1/links:'
class Test_DriverCT(AsyncTestCase):
def setUp(self):
super(Test_DriverCT,self).setUp()
pass
def tearDown(self):
super(Test_DriverCT,self).tearDown()
@tornado.testing.gen_test
def test_i_create_link(self):
print('test_create_link:')
req_body = {"controller_id": "", "link_parameters": {}}
code, resp = yield base_handler.do_json_post(driver_prefix_links_uri + 'create-link', req_body)
self.assertEqual(200, code, 'FAIL:test_create_link')
@tornado.testing.gen_test
def test_h_delete_lsp(self):
print('test_delete_lsp:')
#req: {"uid": "46", "user_data": {"lsp_id": "46", "from_router_uid": "PE11A", "lsp_name": "LSP_1-8" }, "callback":"http://127.0.0.1/path"}
#resp: {"lsp_uid":0, "lsp_name":"", "status":1}
req_body = {"uid": "46", "user_data": {"lsp_id": "46", "from_router_uid": "PE14Z", "lsp_name": "lsp_zte" }, "callback":"http://127.0.0.1/path"}
code, resp = yield base_handler.do_json_post(driver_prefix_lsps_uri + 'get-lsp', req_body)
self.assertEqual(200, code, 'FAIL:test_delete_lsp')
@tornado.testing.gen_test
def test_g_delete_flow_policy(self):
print('test_delete_flow_policy:')
#req: {"uid": "46", "user_data": {"lsp_id": "49", "flow_id": "flow_LSP_rest_1-6-5-8", "from_router_uid": 2, "flow_name": "lsp_LSP_rest_1-6-5-8_100", "lsp_name": "LSP_rest_1-6-5-8"}, "callback":"http://127.0.0.1/path"}
#resp: {"flow_src": "", "flow_dst": "", "flow_uid": "","status":1, "user_data": {}}
req_body = {"uid": "46", "user_data": {"lsp_id": "49", "flow_id": "flow_LSP_rest_1-6-5-8", "from_router_uid": 'PE14Z', "flow_name": "lsp_LSP_rest_1-6-5-8_100", "lsp_name": "LSP_rest_1-6-5-8"}, "callback":"http://127.0.0.1/path"}
code, resp = yield base_handler.do_json_post(driver_prefix_flow_policy_uri + 'create-flow-policy', req_body)
self.assertEqual(200, code, 'FAIL:test_delete_flow_policy')
@tornado.testing.gen_test
def test_f_get_flow_policy(self):
print('test_get_flow_policy:')
#req: {"uid": "flow_uid", "user_data": {"lsp_id": "49", "flow_id": "flow_LSP_rest_1-6-5-8", "from_router_uid": 2, "flow_name": "lsp_LSP_rest_1-6-5-8_100", "lsp_name": "LSP_rest_1-6-5-8"}}
req_body = {"uid": "flow_uid", "user_data": {"lsp_id": "49", "flow_id": "flow_LSP_rest_1-6-5-8", "from_router_uid": 'PE14Z', "flow_name": "lsp_LSP_rest_1-6-5-8_100", "lsp_name": "LSP_rest_1-6-5-8"}}
code, resp = yield base_handler.do_json_post(driver_prefix_flow_policy_uri + 'get-flow-policy', req_body)
self.assertEqual(200, code, 'FAIL:test_get_flow_policy')
@tornado.testing.gen_test
def test_e_create_flow_policy(self):
print('test_create_flow_policy:')
#req: {"flow_name": "", "lsp_uid": "lsp_0", "priority":7, "flow": {"src": "1.2.3.0/24", "dst": "5.6.7.8/24"},"user_data": {'lsp_id': '41', 'from_router_uid': 'PE11A', 'lsp_name': 'ALU_S'}, "callback":"http://127.0.0.1/path"}
#resp: {"flow_src": "", "flow_dst": "", "flow_uid": "","status":1, "user_data": {}}
req_body = {"flow_name": "", "lsp_uid": "lsp_0", "priority":7, "flow": {"src": "1.2.3.0/24", "dst": "5.6.7.8/24"},"user_data": {'lsp_id': '41', 'from_router_uid': 'PE14Z', 'lsp_name': 'lsp_zte'}, "callback":"http://127.0.0.1/path"}
code, resp = yield base_handler.do_json_post(driver_prefix_flow_policy_uri + 'create-flow-policy', req_body)
self.assertEqual(200, code, 'FAIL:test_create_flow_policy')
@tornado.testing.gen_test
def test_d_get_lsp(self):
print('test_get_lsp:')
#req: {"uid": "46", "user_data": { "lsp_id": "46", "from_router_uid": "PE11A", "lsp_name": "LSP_1-8" }}
#resp: [ {"uid": "lsp_0", "from_router_name": "", "to_router_name": "", "bandwidth": "", "to_router_uid": "", "from_router_uid": "PE14Z", "name": "lsp_zte", "hop_list":[], "path":[], "status":1, "priority":7, "delay":"", "user_data":{}} ]
req_body = {"uid": "46", "user_data": { "lsp_id": "46", "from_router_uid": "PE14Z", "lsp_name": "lsp_zte" }}
code, resp = yield base_handler.do_json_post(driver_prefix_lsps_uri + 'get-lsp', req_body)
self.assertEqual(200, code, 'FAIL:test_get_lsp')
@tornado.testing.gen_test
def test_c_update_lsp(self):
print('test_update_lsp:')
#req: {"from_router_name": "", "to_router_name": "", "bandwidth": "", "to_router_uid": "", "from_router_uid": "", "callback":"http://127.0.0.1/path", "name": "", "hop_list":[], "priority":7, "delay":""}
#resp: {"lsp_uid":0, "lsp_name":"", "status":1}
req_body = {"uid": "46", "user_data": { "lsp_id": "46", "from_router_uid": "PE14Z", "lsp_name": "LSP_1-8" }, "callback":"http://127.0.0.1/path", "bandwidth":"1000"}
code, resp = yield base_handler.do_json_post(driver_prefix_lsps_uri + 'update-lsp', req_body)
self.assertEqual(200, code, 'FAIL:test_update_lsp')
@tornado.testing.gen_test
def test_b_create_lsp(self):
print('test_create_lsp:')
#req: {"from_router_name": "", "to_router_name": "", "bandwidth": "", "to_router_uid": "", "from_router_uid": "", "callback":"http://127.0.0.1/path", "name": "", "hop_list":[], "priority":7, "delay":""}
#resp: {"lsp_uid":0, "lsp_name":"", "status":1, "user_data":{}}
req_body = {"from_router_name": "", "to_router_name": "", "bandwidth": "100", "to_router_uid": "PE14Z", "from_router_uid": "PE14Z", "callback":"http://127.0.0.1/path", "name": "lsp_zte", "hop_list":[], "priority":7, "delay":""}
code, resp = yield base_handler.do_json_post(driver_prefix_lsps_uri + 'create-lsp', req_body)
self.assertEqual(200, code, 'FAIL:test_create_lsp')
@tornado.testing.gen_test
def test_a_set_nodes(self):
print('test_set_nodes:')
#req: {"equips":[{"vendor": "ZTE", "uid": "PE14Z", "pos": "Old village of Gao", "community":"roastedchikenPE14Z", "ip_str": "14.14.14.14", "y": 48.9, "x": 113.8, "model": "aladin", "name": "PE14Z"} ]}
#resp: {"err_code":0, "msg":"set equips finished"}
req_body = {"equips":[{"vendor": "ZTE", "uid": "PE14Z", "pos": "Old village of Gao", "community":"roastedchikenPE14Z", "ip_str": "14.14.14.14", "y": 48.9, "x": 113.8, "model": "aladin", "name": "PE14Z"} ]}
code, resp = yield base_handler.do_json_post(driver_prefix_nodes_uri + 'set-nodes', req_body)
self.assertIn('err_code', resp, 'FAIL:test_set_nodes')
if __name__ == '__main__':
print '---Service Started....'
# os.system('coverage erase')
driver_serv = subprocess.Popen(driver_serv_cmd, shell=True)
test_serv = subprocess.Popen(test_serv_cmd, shell=True)
fake_serv = subprocess.Popen(fake_openo_serv_cmd, shell=True)
# tunnel_server = subprocess.Popen(tunnel_server_cmd, shell=True)
# cus_server = subprocess.Popen(cus_server_cmd, shell=True)
# ms_controller_server = subprocess.Popen(ms_controller_cmd, shell=True)
time.sleep(3)
suite = unittest.TestLoader().loadTestsFromTestCase(Test_DriverCT)
unittest.TextTestRunner(verbosity=2).run(suite)
try:
print '---Service Terminated...'
sig = 2 #signal.SIGINT
driver_serv.send_signal(sig)
test_serv.send_signal(sig)
fake_serv.send_signal(sig)
# tunnel_server.send_signal(sig)
# cus_server.send_signal(sig)
# ms_controller_server.send_signal(sig)
print '@@@Service Terminated...'
pass
except:
print '*****Service Terminated...'
traceback.print_exc()
pass
# subprocess.Popen('tskill python & tskill python', shell=True)
# os.system('coverage combine & coverage html')
print '+++Service Terminated...'
|
py | b412bfe00d16dad91e59d938bbd5eb54650d4dc7 | import sys
from pathlib import Path
import click
from builder import VERSION
from builder.engine import Engine
from builder.project import get_project
from builder.utils import global_options, end, out
@click.command()
@click.option('--quiet', '-q', is_flag=True, help='Suppress normal output.')
@click.option('--verbose', '-v', count=True, help='Produce verbose output. Repeat for more verbosity.')
@click.option('--directory', '-d',
type=click.Path(exists=True, dir_okay=True, file_okay=False, allow_dash=False, resolve_path=True),
help='Specify the project root directory. The current directory is used when this is not specified.')
@click.option('--language', '-l', multiple=True, help='Add "language" to this run. This option may be repeated.')
@click.option('--no-requires', '-r', is_flag=True, help='Run specified tasks without running required tasks first.')
@click.option('--force-fetch', '-f', is_flag=True,
help="Do not read from the local file cache; always download dependencies. This still updates the local "
"file cache.")
@click.option('--set', '-s', 'set_var', multiple=True, metavar='<name=value[,...]>',
help='Set a global variable to a value. This is typically used to provide input data to a task. '
'Allowed names of variables are determined by tasks that support them. The value of this option '
'may be a comma-separated list of "name=value" pairs and/or the option may repeated.')
@click.version_option(version=VERSION, help="Show the version of builder and exit.")
@click.argument('tasks', nargs=-1)
def cli(quiet, verbose, directory, language, no_requires, force_fetch, set_var, tasks):
"""
Use this tool to build things based on a language.
Each language has its own toolchain and associated tasks. Describe a project
in a "project.yaml" file at the root of your project.
"""
# First, we need to store our global options.
global_options.\
set_quiet(quiet).\
set_verbose(verbose).\
set_languages(language).\
set_independent_tasks(no_requires).\
set_force_remote_fetch(force_fetch).\
set_vars(set_var).\
set_tasks(tasks)
project = get_project(Path(directory) if directory else Path.cwd())
if project.has_no_languages():
end('No language(s) specified in project.yaml or with --language option.')
if project.has_unknown_languages():
unsupported = ', '.join(project.get_unknown_languages())
end(f'Unsupported language(s) specified in project.yaml/--language: {unsupported}')
# Make the project globally known.
global_options.set_project(project)
out(f'Project: {project.description}', fg='bright_white')
try:
sys.exit(Engine(project).run())
except ValueError as error:
end(error.args[0])
|
py | b412c05f0a7fde005af94ca563ca0d47523da7fd | import json
from PyQt5.QtCore import QThread, pyqtSignal
from threadsMcl.Connection import ServerConnect
class UnlockCoin(QThread):
change_value_information_get_unlock = pyqtSignal(bool)
change_value_information_get_transactionID = pyqtSignal(str)
command_mcl_unlock_coin = ""
command_mcl_unlock_coin_sendrawtransaction = ""
server_username = ""
server_hostname = ""
server_password = ""
server_port = 22
def run(self):
self.unlockCoin()
def unlockCoin(self):
print("Sunucya bağlanmak için bilgiler alindi.")
ssh = ServerConnect(self.server_hostname, self.server_username, self.server_password)
print(self.command_mcl_unlock_coin)
stdout = ssh.command(self.command_mcl_unlock_coin)
lines = stdout.readlines()
out_ = ""
for deger in lines:
deger = deger.split("\n")
out_ = out_ + " " + deger[0]
print("Get Info Bitti")
print("-------")
print(out_)
out_ = out_.strip()
try:
y = json.loads(out_)
tmp=y["result"]
self.change_value_information_get_unlock.emit(False)
except:
# Hex Onay
# ---------------------------------------------------------------
print(self.command_mcl_unlock_coin_sendrawtransaction + "\"" + out_ + "\"")
stdout = ssh.command(self.command_mcl_unlock_coin_sendrawtransaction + "\"" + out_ + "\"")
lines = stdout.readlines()
out_ = ""
for deger in lines:
deger = deger.split("\n")
out_ = out_ + " " + deger[0]
print(out_)
self.change_value_information_get_transactionID.emit(out_)
self.change_value_information_get_unlock.emit(True)
|
py | b412c1138ec17d4472855b39f5acd77d3d3763c0 | """backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from rest_framework.authtoken.views import ObtainAuthToken
urlpatterns = [
path("admin/", admin.site.urls),
# API base url
path("api/", include("backend.api_router")),
path("api-token-auth/", ObtainAuthToken.as_view()),
]
|
py | b412c37efd9fd33ca8a156e5a16022d99bcee4fd | # import
import random
import numpy as np
from pprint import pprint
from pydub import AudioSegment
from typing import List, Tuple
from load_data import match_target_amplitude, graph_spectrogram, get_spectrogram
# code
# 랜덤 시간 분할 (배경에 삽입을 위해)
def get_random_time_segment(segment_ms: int) -> Tuple[int, int]:
segment_start = np.random.randint(low=0, high=10000-segment_ms)
segment_end = segment_start + segment_ms - 1
return (segment_start, segment_end)
# 오버랩 체크
def is_overlapping(segment_time: Tuple[int, int], previous_segments: List) -> bool:
segment_start, segment_end = segment_time
overlap = False # overlap을 False로 초기화
# 오버랩 체크
for previous_start, previous_end in previous_segments:
if segment_start <= previous_end and segment_end >= previous_start:
overlap = True
return overlap
# 배경 소음을 overlay 하기 위해
def insert_audio_clip(background: AudioSegment, audio_clip: AudioSegment, previous_segments: List) -> Tuple[AudioSegment, Tuple]:
segment_ms = len(audio_clip)
segment_time = get_random_time_segment(segment_ms)
while is_overlapping(segment_time, previous_segments):
segment_time = get_random_time_segment(segment_ms)
previous_segments.append(segment_time)
new_background = background.overlay(audio_clip, position=segment_time[0])
return new_background, segment_time
def insert_ones(y: List, segment_end_ms: int, index: int) -> List:
Ty = y.shape[1]
segment_end_y = int(segment_end_ms * Ty / 10000.0)
for i in range(segment_end_y + 1, segment_end_y + 51):
if i < Ty:
y[0, i] = index
return y
# insert_audio_clip과 insert_ones를 이용하여 새로운 training 예제를 만듬
def create_training_example(background: AudioSegment, positives: List[List[AudioSegment]], negatives: List[AudioSegment], Ty: int) -> Tuple[List, List]:
background = background - 20 # 배경 소리 크기 줄이기 위함
y = np.zeros((1, Ty))
previous_segments = []
# positive insertion
for index, positive in enumerate(positives):
number_of_positives = np.random.randint(1, 3)
random_indices = np.random.randint(len(positive), size=number_of_positives)
random_positives = [positive[i] for i in random_indices]
for random_positive in random_positives:
background, segment_time = insert_audio_clip(background, random_positive, previous_segments)
segment_start, segment_end = segment_time
y = insert_ones(y, segment_end, index + 1)
# negative insertion
number_of_negatives = np.random.randint(0, 2)
random_indices = np.random.randint(len(negatives), size=number_of_negatives)
random_negatives = [negatives[i] for i in random_indices]
for random_negative in random_negatives:
background, _ = insert_audio_clip(background, random_negative, previous_segments)
background = match_target_amplitude(background, -20.0)
background.export("./datasets/created_file/train.wav", format="wav") # _io.BufferedRandom
x = get_spectrogram("./datasets/created_file/train.wav")
return x, y
# test
if __name__ == "__main__":
print(get_spectrogram('./test_wav.wav'))
|
py | b412c42d47139afcc72ae395eea002b75ec44068 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import json
from oslo_utils import encodeutils
from requests import codes
import urllib.parse
import warlock
from glanceclient.common import utils
from glanceclient import exc
from glanceclient.v2 import schemas
DEFAULT_PAGE_SIZE = 20
SORT_DIR_VALUES = ('asc', 'desc')
SORT_KEY_VALUES = ('name', 'status', 'container_format', 'disk_format',
'size', 'id', 'created_at', 'updated_at')
class Controller(object):
def __init__(self, http_client, schema_client):
self.http_client = http_client
self.schema_client = schema_client
@utils.memoized_property
def model(self):
schema = self.schema_client.get('image')
warlock_model = warlock.model_factory(
schema.raw(), base_class=schemas.SchemaBasedModel)
return warlock_model
@utils.memoized_property
def unvalidated_model(self):
"""A model which does not validate the image against the v2 schema."""
schema = self.schema_client.get('image')
warlock_model = warlock.model_factory(
schema.raw(), base_class=schemas.SchemaBasedModel)
warlock_model.validate = lambda *args, **kwargs: None
return warlock_model
@staticmethod
def _wrap(value):
if isinstance(value, str):
return [value]
return value
@staticmethod
def _validate_sort_param(sort):
"""Validates sorting argument for invalid keys and directions values.
:param sort: comma-separated list of sort keys with optional <:dir>
after each key
"""
for sort_param in sort.strip().split(','):
key, _sep, dir = sort_param.partition(':')
if dir and dir not in SORT_DIR_VALUES:
msg = ('Invalid sort direction: %(sort_dir)s.'
' It must be one of the following: %(available)s.'
) % {'sort_dir': dir,
'available': ', '.join(SORT_DIR_VALUES)}
raise exc.HTTPBadRequest(msg)
if key not in SORT_KEY_VALUES:
msg = ('Invalid sort key: %(sort_key)s.'
' It must be one of the following: %(available)s.'
) % {'sort_key': key,
'available': ', '.join(SORT_KEY_VALUES)}
raise exc.HTTPBadRequest(msg)
return sort
@utils.add_req_id_to_generator()
def list(self, **kwargs):
"""Retrieve a listing of Image objects.
:param page_size: Number of images to request in each
paginated request.
:returns: generator over list of Images.
"""
limit = kwargs.get('limit')
# NOTE(flaper87): Don't use `get('page_size', DEFAULT_SIZE)` otherwise,
# it could be possible to send invalid data to the server by passing
# page_size=None.
page_size = kwargs.get('page_size') or DEFAULT_PAGE_SIZE
def paginate(url, page_size, limit=None):
next_url = url
req_id_hdr = {}
while True:
if limit and page_size > limit:
# NOTE(flaper87): Avoid requesting 2000 images when limit
# is 1
next_url = next_url.replace("limit=%s" % page_size,
"limit=%s" % limit)
resp, body = self.http_client.get(next_url, headers=req_id_hdr)
# NOTE(rsjethani): Store curent request id so that it can be
# used in subsequent requests. Refer bug #1525259
req_id_hdr['x-openstack-request-id'] = \
utils._extract_request_id(resp)
for image in body['images']:
# NOTE(bcwaldon): remove 'self' for now until we have
# an elegant way to pass it into the model constructor
# without conflict.
image.pop('self', None)
# We do not validate the model when listing.
# This prevents side-effects of injecting invalid
# schema values via v1.
yield self.unvalidated_model(**image), resp
if limit:
limit -= 1
if limit <= 0:
return
try:
next_url = body['next']
except KeyError:
return
filters = kwargs.get('filters', {})
# NOTE(flaper87): We paginate in the client, hence we use
# the page_size as Glance's limit.
filters['limit'] = page_size
tags = filters.pop('tag', [])
tags_url_params = []
for tag in tags:
if not isinstance(tag, str):
raise exc.HTTPBadRequest("Invalid tag value %s" % tag)
tags_url_params.append({'tag': encodeutils.safe_encode(tag)})
for param, value in filters.items():
if isinstance(value, str):
filters[param] = encodeutils.safe_encode(value)
url = '/v2/images?%s' % urllib.parse.urlencode(filters)
for param in tags_url_params:
url = '%s&%s' % (url, urllib.parse.urlencode(param))
if 'sort' in kwargs:
if 'sort_key' in kwargs or 'sort_dir' in kwargs:
raise exc.HTTPBadRequest("The 'sort' argument is not supported"
" with 'sort_key' or 'sort_dir'.")
url = '%s&sort=%s' % (url,
self._validate_sort_param(
kwargs['sort']))
else:
sort_dir = self._wrap(kwargs.get('sort_dir', []))
sort_key = self._wrap(kwargs.get('sort_key', []))
if len(sort_key) != len(sort_dir) and len(sort_dir) > 1:
raise exc.HTTPBadRequest(
"Unexpected number of sort directions: "
"either provide a single sort direction or an equal "
"number of sort keys and sort directions.")
for key in sort_key:
url = '%s&sort_key=%s' % (url, key)
for dir in sort_dir:
url = '%s&sort_dir=%s' % (url, dir)
if isinstance(kwargs.get('marker'), str):
url = '%s&marker=%s' % (url, kwargs['marker'])
for image, resp in paginate(url, page_size, limit):
yield image, resp
@utils.add_req_id_to_object()
def _get(self, image_id, header=None):
url = '/v2/images/%s' % image_id
header = header or {}
resp, body = self.http_client.get(url, headers=header)
# NOTE(bcwaldon): remove 'self' for now until we have an elegant
# way to pass it into the model constructor without conflict
body.pop('self', None)
return self.unvalidated_model(**body), resp
def get(self, image_id):
return self._get(image_id)
@utils.add_req_id_to_object()
def get_associated_image_tasks(self, image_id):
"""Get the tasks associated with an image.
:param image_id: ID of the image
:raises: exc.HTTPNotImplemented if Glance is not new enough to support
this API (v2.12).
"""
# NOTE (abhishekk): Verify that /v2i/images/%s/tasks is supported by
# glance
if utils.has_version(self.http_client, 'v2.12'):
url = '/v2/images/%s/tasks' % image_id
resp, body = self.http_client.get(url)
body.pop('self', None)
return body, resp
else:
raise exc.HTTPNotImplemented(
'This operation is not supported by Glance.')
@utils.add_req_id_to_object()
def data(self, image_id, do_checksum=True, allow_md5_fallback=False):
"""Retrieve data of an image.
When do_checksum is enabled, validation proceeds as follows:
1. if the image has a 'os_hash_value' property, the algorithm
specified in the image's 'os_hash_algo' property will be used
to validate against the 'os_hash_value' value. If the
specified hash algorithm is not available AND allow_md5_fallback
is True, then continue to step #2
2. else if the image has a checksum property, MD5 is used to
validate against the 'checksum' value. (If MD5 is not available
to the client, the download fails.)
3. else if the download response has a 'content-md5' header, MD5
is used to validate against the header value. (If MD5 is not
available to the client, the download fails.)
4. if none of 1-3 obtain, the data is **not validated** (this is
compatible with legacy behavior)
:param image_id: ID of the image to download
:param do_checksum: Enable/disable checksum validation
:param allow_md5_fallback:
Use the MD5 checksum for validation if the algorithm specified by
the image's 'os_hash_algo' property is not available
:returns: An iterable body or ``None``
"""
if do_checksum:
# doing this first to prevent race condition if image record
# is deleted during the image download
url = '/v2/images/%s' % image_id
resp, image_meta = self.http_client.get(url)
meta_checksum = image_meta.get('checksum', None)
meta_hash_value = image_meta.get('os_hash_value', None)
meta_hash_algo = image_meta.get('os_hash_algo', None)
url = '/v2/images/%s/file' % image_id
resp, body = self.http_client.get(url)
if resp.status_code == codes.no_content:
return None, resp
checksum = resp.headers.get('content-md5', None)
content_length = int(resp.headers.get('content-length', 0))
check_md5sum = do_checksum
if do_checksum and meta_hash_value is not None:
try:
hasher = hashlib.new(str(meta_hash_algo))
body = utils.serious_integrity_iter(body,
hasher,
meta_hash_value)
check_md5sum = False
except ValueError as ve:
if (str(ve).startswith('unsupported hash type') and
allow_md5_fallback):
check_md5sum = True
else:
raise
if do_checksum and check_md5sum:
if meta_checksum is not None:
body = utils.integrity_iter(body, meta_checksum)
elif checksum is not None:
body = utils.integrity_iter(body, checksum)
else:
# NOTE(rosmaita): this preserves legacy behavior to return the
# image data when checksumming is requested but there's no
# 'content-md5' header in the response. Just want to make it
# clear that we're doing this on purpose.
pass
return utils.IterableWithLength(body, content_length), resp
@utils.add_req_id_to_object()
def upload(self, image_id, image_data, image_size=None, u_url=None,
backend=None):
"""Upload the data for an image.
:param image_id: ID of the image to upload data for.
:param image_data: File-like object supplying the data to upload.
:param image_size: Unused - present for backwards compatibility
:param u_url: Upload url to upload the data to.
:param backend: Backend store to upload image to.
"""
url = u_url or '/v2/images/%s/file' % image_id
hdrs = {'Content-Type': 'application/octet-stream'}
if backend is not None:
hdrs['x-image-meta-store'] = backend
body = image_data
resp, body = self.http_client.put(url, headers=hdrs, data=body)
return (resp, body), resp
@utils.add_req_id_to_object()
def get_import_info(self):
"""Get Import info from discovery endpoint."""
url = '/v2/info/import'
resp, body = self.http_client.get(url)
return body, resp
@utils.add_req_id_to_object()
def get_stores_info(self):
"""Get available stores info from discovery endpoint."""
url = '/v2/info/stores'
resp, body = self.http_client.get(url)
return body, resp
@utils.add_req_id_to_object()
def delete_from_store(self, store_id, image_id):
"""Delete image data from specific store."""
url = ('/v2/stores/%(store)s/%(image)s' % {'store': store_id,
'image': image_id})
resp, body = self.http_client.delete(url)
return body, resp
@utils.add_req_id_to_object()
def stage(self, image_id, image_data, image_size=None):
"""Upload the data to image staging.
:param image_id: ID of the image to upload data for.
:param image_data: File-like object supplying the data to upload.
:param image_size: Unused - present for backwards compatibility
"""
url = '/v2/images/%s/stage' % image_id
resp, body = self.upload(image_id,
image_data,
u_url=url)
return body, resp
@utils.add_req_id_to_object()
def image_import(self, image_id, method='glance-direct', uri=None,
backend=None, stores=None, allow_failure=True,
all_stores=None):
"""Import Image via method."""
headers = {}
url = '/v2/images/%s/import' % image_id
data = {'method': {'name': method}}
if stores:
data['stores'] = stores
if allow_failure:
data['all_stores_must_succeed'] = False
if backend is not None:
headers['x-image-meta-store'] = backend
if all_stores:
data['all_stores'] = True
if allow_failure:
data['all_stores_must_succeed'] = False
if uri:
if method == 'web-download':
data['method']['uri'] = uri
else:
raise exc.HTTPBadRequest('URI is only supported with method: '
'"web-download"')
resp, body = self.http_client.post(url, data=data, headers=headers)
return body, resp
@utils.add_req_id_to_object()
def delete(self, image_id):
"""Delete an image."""
url = '/v2/images/%s' % image_id
resp, body = self.http_client.delete(url)
return (resp, body), resp
@utils.add_req_id_to_object()
def create(self, **kwargs):
"""Create an image."""
headers = {}
url = '/v2/images'
backend = kwargs.pop('backend', None)
if backend is not None:
headers['x-image-meta-store'] = backend
image = self.model()
for (key, value) in kwargs.items():
try:
setattr(image, key, value)
except warlock.InvalidOperation as e:
raise TypeError(encodeutils.exception_to_unicode(e))
resp, body = self.http_client.post(url, headers=headers, data=image)
# NOTE(esheffield): remove 'self' for now until we have an elegant
# way to pass it into the model constructor without conflict
body.pop('self', None)
return self.model(**body), resp
@utils.add_req_id_to_object()
def deactivate(self, image_id):
"""Deactivate an image."""
url = '/v2/images/%s/actions/deactivate' % image_id
resp, body = self.http_client.post(url)
return (resp, body), resp
@utils.add_req_id_to_object()
def reactivate(self, image_id):
"""Reactivate an image."""
url = '/v2/images/%s/actions/reactivate' % image_id
resp, body = self.http_client.post(url)
return (resp, body), resp
def update(self, image_id, remove_props=None, **kwargs):
"""Update attributes of an image.
:param image_id: ID of the image to modify.
:param remove_props: List of property names to remove
:param kwargs: Image attribute names and their new values.
"""
unvalidated_image = self.get(image_id)
image = self.model(**unvalidated_image)
for (key, value) in kwargs.items():
try:
setattr(image, key, value)
except warlock.InvalidOperation as e:
raise TypeError(encodeutils.exception_to_unicode(e))
if remove_props:
cur_props = image.keys()
new_props = kwargs.keys()
# NOTE(esheffield): Only remove props that currently exist on the
# image and are NOT in the properties being updated / added
props_to_remove = set(cur_props).intersection(
set(remove_props).difference(new_props))
for key in props_to_remove:
delattr(image, key)
url = '/v2/images/%s' % image_id
hdrs = {'Content-Type': 'application/openstack-images-v2.1-json-patch'}
resp, _ = self.http_client.patch(url, headers=hdrs, data=image.patch)
# Get request id from `patch` request so it can be passed to the
# following `get` call
req_id_hdr = {
'x-openstack-request-id': utils._extract_request_id(resp)}
# NOTE(bcwaldon): calling image.patch doesn't clear the changes, so
# we need to fetch the image again to get a clean history. This is
# an obvious optimization for warlock
return self._get(image_id, req_id_hdr)
def _get_image_with_locations_or_fail(self, image_id):
image = self.get(image_id)
if getattr(image, 'locations', None) is None:
raise exc.HTTPBadRequest('The administrator has disabled '
'API access to image locations')
return image
@utils.add_req_id_to_object()
def _send_image_update_request(self, image_id, patch_body):
url = '/v2/images/%s' % image_id
hdrs = {'Content-Type': 'application/openstack-images-v2.1-json-patch'}
resp, body = self.http_client.patch(url, headers=hdrs,
data=json.dumps(patch_body))
return (resp, body), resp
def add_location(self, image_id, url, metadata, validation_data=None):
"""Add a new location entry to an image's list of locations.
It is an error to add a URL that is already present in the list of
locations.
:param image_id: ID of image to which the location is to be added.
:param url: URL of the location to add.
:param metadata: Metadata associated with the location.
:param validation_data: Validation data for the image.
:returns: The updated image
"""
add_patch = [{'op': 'add', 'path': '/locations/-',
'value': {'url': url, 'metadata': metadata}}]
if validation_data:
add_patch[0]['value']['validation_data'] = validation_data
response = self._send_image_update_request(image_id, add_patch)
# Get request id from the above update request and pass the same to
# following get request
req_id_hdr = {'x-openstack-request-id': response.request_ids[0]}
return self._get(image_id, req_id_hdr)
def delete_locations(self, image_id, url_set):
"""Remove one or more location entries of an image.
:param image_id: ID of image from which locations are to be removed.
:param url_set: set of URLs of location entries to remove.
:returns: None
"""
image = self._get_image_with_locations_or_fail(image_id)
current_urls = [l['url'] for l in image.locations]
missing_locs = url_set.difference(set(current_urls))
if missing_locs:
raise exc.HTTPNotFound('Unknown URL(s): %s' % list(missing_locs))
# NOTE: warlock doesn't generate the most efficient patch for remove
# operations (it shifts everything up and deletes the tail elements) so
# we do it ourselves.
url_indices = [current_urls.index(url) for url in url_set]
url_indices.sort(reverse=True)
patches = [{'op': 'remove', 'path': '/locations/%s' % url_idx}
for url_idx in url_indices]
return self._send_image_update_request(image_id, patches)
def update_location(self, image_id, url, metadata):
"""Update an existing location entry in an image's list of locations.
The URL specified must be already present in the image's list of
locations.
:param image_id: ID of image whose location is to be updated.
:param url: URL of the location to update.
:param metadata: Metadata associated with the location.
:returns: The updated image
"""
image = self._get_image_with_locations_or_fail(image_id)
url_map = dict([(l['url'], l) for l in image.locations])
if url not in url_map:
raise exc.HTTPNotFound('Unknown URL: %s, the URL must be one of'
' existing locations of current image' %
url)
if url_map[url]['metadata'] == metadata:
return image
url_map[url]['metadata'] = metadata
patches = [{'op': 'replace',
'path': '/locations',
'value': list(url_map.values())}]
response = self._send_image_update_request(image_id, patches)
# Get request id from the above update request and pass the same to
# following get request
req_id_hdr = {'x-openstack-request-id': response.request_ids[0]}
return self._get(image_id, req_id_hdr)
|
py | b412c49cf56e2f4f1b535d059d423abf87ebd53b | from __future__ import annotations
import asyncio
import uuid
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Dict, List, NoReturn, Optional, Union
import aiohttp
from cafeteria.asyncio.callbacks import (
CallbackRegistry,
CallbackType,
SimpleTriggerCallback,
)
from aiographql.client.helpers import aiohttp_client_session
from aiographql.client.request import GraphQLRequestContainer
from aiographql.client.response import GraphQLBaseResponse, GraphQLResponse
class GraphQLSubscriptionEventType(Enum):
"""
GraphQL Subscription Event Types
"""
CONNECTION_INIT = "connection_init"
CONNECTION_ACK = "connection_ack"
CONNECTION_ERROR = "connection_error"
CONNECTION_TERMINATE = "connection_terminate"
START = "start"
DATA = "data"
ERROR = "error"
COMPLETE = "complete"
STOP = "stop"
KEEP_ALIVE = "ka"
CallbacksType = Union[
CallbackRegistry,
Dict[GraphQLSubscriptionEventType, Union[CallbackType, List[CallbackType]]],
]
@dataclass(frozen=True)
class GraphQLSubscriptionEvent(GraphQLBaseResponse):
"""
GraphQL subscription event wrapping the payload received from the server.
:param subscription_id: The id of the subscription that generated this event.
"""
subscription_id: Optional[str] = field(default=None)
@property
def id(self) -> Optional[str]:
"""The id of the event, if available."""
return self.json.get("id")
@property
def type(self) -> Optional[GraphQLSubscriptionEventType]:
"""The type of event (:class:`GraphQLSubscriptionEventType`)."""
try:
return GraphQLSubscriptionEventType(self.json.get("type"))
except ValueError:
pass
@property
def payload(self) -> Optional[Union[GraphQLResponse, str]]:
"""The id of the subscription that generated this event."""
payload = self.json.get("payload")
if payload is not None:
if self.type in (
GraphQLSubscriptionEventType.DATA,
GraphQLSubscriptionEventType.ERROR,
):
return GraphQLResponse(request=self.request, json=payload)
return payload
@dataclass(frozen=True)
class GraphQLSubscription(GraphQLRequestContainer):
"""
Subscription container, with an attached
:class:`cafeteria.asyncio.callbacks.CallbackRegistry`. When subscribed,
the `task` will be populated with the :class:`asyncio.Task` instance.
By default the subscription will be stopped, if an error, connection error or
complete (:class:`GraphQLSubscriptionEventType`) is received.
Subscription instances are intended to be used as immutable objects. However,
`callbacks` and `stop_event_types` can be updated after initialisation.
:param id: A unique subscription identifier that will be passed into any events
generated by this subscription.
:param callbacks: A :class:`CallbackRegistry` containing a mapping of
:class:`GraphQLSubscriptionEventType` callback methods to trigger.
:param stop_event_types: Events that cause the subscription to stop. By default,
connection error, query error or connection complete events received are
considered stop events.
"""
id: str = field(default_factory=lambda: str(uuid.uuid4()), init=False)
callbacks: Optional[CallbacksType] = field(default_factory=CallbackRegistry)
stop_event_types: List[GraphQLSubscriptionEventType] = field(
default_factory=lambda: [
GraphQLSubscriptionEventType.ERROR,
GraphQLSubscriptionEventType.CONNECTION_ERROR,
GraphQLSubscriptionEventType.COMPLETE,
]
)
task: asyncio.Task = field(default=None, init=False, compare=False)
def __post_init__(
self,
headers: Optional[Dict[str, str]] = None,
operation: Optional[str] = None,
variables: Optional[Dict[str, Any]] = None,
):
super().__post_init__(headers, operation, variables)
if self.callbacks is None:
object.__setattr__(self, "callbacks", CallbackRegistry())
elif isinstance(self.callbacks, dict):
object.__setattr__(
self, "callbacks", CallbackRegistry(callbacks=self.callbacks)
)
def active(self) -> bool:
"""
Check if the subscription is active.
:return: `True` if subscribed and active.
"""
return (
self.task is not None and not self.task.done() and not self.task.cancelled()
)
def connection_init_request(self) -> Dict[str, Any]:
"""
Connection init payload to use when initiating a new subscription.
:return: Connection initialise payload.
"""
return {
"type": GraphQLSubscriptionEventType.CONNECTION_INIT.value,
"payload": {"headers": {**self.request.headers}},
}
def connection_start_request(self) -> Dict[str, Any]:
"""
Connection start payload to use when starting a subscription.
:return: Connection start payload.
"""
return {
"id": self.id,
"type": GraphQLSubscriptionEventType.START.value,
"payload": self.request.payload(),
}
def connection_stop_request(self) -> Dict[str, Any]:
"""
Connection stop payload to use when stopping a subscription.
:return: Connection stop payload.
"""
return {"id": self.id, "type": GraphQLSubscriptionEventType.STOP.value}
def is_stop_event(self, event: GraphQLSubscriptionEvent) -> bool:
"""
Check if the provided *event* is configured as a stop even for this subscription.
:param event: Event to check.
:return: `True` if `event` is in `stop_event_types`.
"""
return event.type in self.stop_event_types
async def handle(self, event: GraphQLSubscriptionEvent) -> NoReturn:
"""
Helper method to dispatch any configured callbacks for the specified event type.
:param event: Event to dispatch callbacks for.
"""
if event.id is None or event.id == self.id:
self.callbacks.dispatch(event.type, event)
async def _websocket_connect(
self, endpoint: str, session: aiohttp.ClientSession
) -> None:
"""
Helper method to create websocket connection with specified *endpoint*
using the specified :class:`aiohttp.ClientSession`. Once connected, we
initialise and start the GraphQL subscription; then wait for any incoming
messages. Any message received via the websocket connection is cast into
a :class:`GraphQLSubscriptionEvent` instance and dispatched for handling via
:method:`handle`.
:param endpoint: Endpoint to use when creating the websocket connection.
:param session: Session to use when creating the websocket connection.
"""
async with session.ws_connect(endpoint, protocols=('graphql-ws',)) as ws:
await ws.send_json(data=self.connection_init_request())
self.callbacks.register(
GraphQLSubscriptionEventType.CONNECTION_ACK,
SimpleTriggerCallback(
function=ws.send_json, data=self.connection_start_request()
),
)
try:
async for msg in ws: # type: aiohttp.WSMessage
if msg.type != aiohttp.WSMsgType.TEXT:
if msg.type == aiohttp.WSMsgType.ERROR:
break
continue
event = GraphQLSubscriptionEvent(
subscription_id=self.id,
request=self.request,
json=msg.json(),
)
await self.handle(event=event)
if self.is_stop_event(event):
break
except (asyncio.CancelledError, KeyboardInterrupt):
await ws.send_json(data=self.connection_stop_request())
async def _subscribe(
self, endpoint: str, session: Optional[aiohttp.ClientSession] = None
) -> None:
"""
Helper method wrapping :method:`GraphQLSubscription._websocket_connect` handling
unique :class:`aiohttp.ClentSession` creation if on is not already provided.
:param endpoint: Endpoint to use when creating the websocket connection.
:param session: Optional session to use when creating the websocket connection.
"""
if session:
return await self._websocket_connect(endpoint=endpoint, session=session)
async with aiohttp_client_session() as session:
return await self._websocket_connect(endpoint=endpoint, session=session)
async def subscribe(
self,
endpoint: str,
force: bool = False,
session: Optional[aiohttp.ClientSession] = None,
wait: bool = False,
) -> None:
"""
Create a websocket subscription and set internal task.
:param endpoint: GraphQL endpoint to subscribe to
:param force: Force re-subscription if already subscribed
:param session: Optional session to use for requests
:param wait: If set to `True`, this method will wait until the subscription
is completed, websocket disconnected or async task cancelled.
"""
if self.active() and not force:
return
self.unsubscribe()
task = asyncio.create_task(self._subscribe(endpoint=endpoint, session=session))
object.__setattr__(self, "task", task)
if wait:
try:
await self.task
except asyncio.CancelledError:
pass
def unsubscribe(self) -> None:
"""
Unsubscribe current websocket subscription if active and clear internal task.
"""
if self.active():
try:
self.task.cancel()
except asyncio.CancelledError:
pass
object.__setattr__(self, "task", None)
async def unsubscribe_and_wait(self) -> None:
task = self.task
self.unsubscribe()
try:
await task
except asyncio.CancelledError:
pass
|
py | b412c5812ca78bb9bed43f08315e950c193b6561 | from qtpy.QtWidgets import QWidget
from .ui_script import Ui_script_widget
class WUIScript(QWidget):
def __init__(self):
super(WUIScript, self).__init__()
self.ui = Ui_script_widget()
self.ui.setupUi(self) |
py | b412c5ab3c6eed20dbecaa072d502a90a5769506 | l = [13, 18, 13, 14, 13, 16, 14, 21, 13]
|
py | b412c629d176042d5c835bdc3f7b9ec15f4437b3 | import os
import pathlib
import re
import pytest
import gitlab
import repobee_plug as plug
import repobee_testhelpers
from repobee_testhelpers.funcs import hash_directory
import _repobee.ext
import _repobee.command.peer
import _repobee.ext.gitlab
import _repobee.cli.mainparser
import repobee_plug.cli
from _helpers.asserts import (
assert_template_repos_exist,
assert_repos_exist,
assert_repos_contain,
assert_on_groups,
assert_issues_exist,
assert_num_issues,
assert_cloned_repos,
)
from _helpers.const import (
VOLUME_DST,
BASE_DOMAIN,
LOCAL_DOMAIN,
TEMPLATE_ORG_NAME,
ORG_NAME,
assignment_names,
STUDENT_TEAMS,
STUDENT_TEAM_NAMES,
REPOBEE_GITLAB,
BASE_ARGS_NO_TB,
BASE_ARGS,
STUDENTS_ARG,
MASTER_REPOS_ARG,
TEMPLATE_ORG_ARG,
TEACHER,
ADMIN_TOKEN,
LOCAL_BASE_URL,
)
from _helpers.helpers import (
api_instance,
run_in_docker_with_coverage,
run_in_docker,
update_repo,
expected_num_members_group_assertion,
gitlab_and_groups,
)
@pytest.mark.filterwarnings("ignore:.*Unverified HTTPS request.*")
class TestClone:
"""Integration tests for the clone command."""
def test_clean_clone(self, with_student_repos, tmpdir, extra_args):
"""Test cloning student repos when there are no repos in the current
working directory.
"""
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.repos.clone.as_name_tuple(),
*BASE_ARGS,
*MASTER_REPOS_ARG,
*STUDENTS_ARG,
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert_cloned_repos(STUDENT_TEAMS, assignment_names, tmpdir)
def test_clone_twice(self, with_student_repos, tmpdir, extra_args):
"""Cloning twice in a row should have the same effect as cloning
once.
"""
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.repos.clone.as_name_tuple(),
*BASE_ARGS,
*MASTER_REPOS_ARG,
*STUDENTS_ARG,
]
)
first_result = run_in_docker_with_coverage(
command, extra_args=extra_args
)
second_result = run_in_docker_with_coverage(
command, extra_args=extra_args
)
assert first_result.returncode == 0
assert second_result.returncode == 0
assert_cloned_repos(STUDENT_TEAMS, assignment_names, tmpdir)
def test_clone_does_not_create_dirs_on_fail(
self, with_student_repos, tmpdir, extra_args
):
"""Test that no local directories are created for repos that RepoBee
fails to pull.
"""
non_existing_assignment_names = ["non-existing-1", "non-existing-2"]
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.repos.clone.as_name_tuple(),
*BASE_ARGS,
*STUDENTS_ARG,
"-a",
" ".join(non_existing_assignment_names),
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert [
dir for dir in os.listdir(str(tmpdir)) if os.path.isdir(dir)
] == []
def test_clone_does_not_alter_existing_dirs(
self, with_student_repos, tmpdir, extra_args
):
"""Test that clone does not clobber existing directories."""
team_with_local_repos = STUDENT_TEAMS[0]
teams_without_local_repos = STUDENT_TEAMS[1:]
expected_dir_hashes = []
for template_repo_name in assignment_names:
new_dir = plug.fileutils.generate_repo_path(
str(tmpdir), team_with_local_repos.name, template_repo_name
)
new_dir.mkdir(parents=True)
new_file = new_dir / "file"
new_file.write_text(str(new_dir), encoding="utf-8")
expected_dir_hashes.append((new_dir, hash_directory(new_dir)))
repobee_testhelpers.funcs.initialize_repo(new_dir)
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.repos.clone.as_name_tuple(),
*BASE_ARGS,
*MASTER_REPOS_ARG,
*STUDENTS_ARG,
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert_cloned_repos(
teams_without_local_repos, assignment_names, tmpdir
)
for dirpath, expected_hash in expected_dir_hashes:
dirhash = hash_directory(dirpath)
assert dirhash == expected_hash, "hash mismatch for " + dirpath
def test_discover_repos(self, with_student_repos, tmpdir, extra_args):
"""Test that the --discover-repos option finds all student repos."""
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.repos.clone.as_name_tuple(),
*BASE_ARGS,
*STUDENTS_ARG,
"--discover-repos",
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert_cloned_repos(STUDENT_TEAMS, assignment_names, tmpdir)
@pytest.mark.filterwarnings("ignore:.*Unverified HTTPS request.*")
class TestSetup:
"""Integration tests for the setup command."""
def test_clean_setup(self, extra_args):
"""Test a first-time setup with master repos in the master org."""
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.repos.setup.as_name_tuple(),
*BASE_ARGS,
*TEMPLATE_ORG_ARG,
*MASTER_REPOS_ARG,
*STUDENTS_ARG,
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert_repos_exist(STUDENT_TEAMS, assignment_names)
assert_on_groups(STUDENT_TEAMS)
def test_clean_setup_in_subgroup(self, extra_args):
"""It should be possible to use a subgroup as the target org."""
gl, template_group, target_group = gitlab_and_groups()
subgroup_name = "bestgroup"
subgroup_full_path = f"{target_group.path}/{subgroup_name}"
gl.groups.create(
dict(
name=subgroup_name,
path=subgroup_name,
parent_id=target_group.id,
)
)
base_args = [
arg if arg != ORG_NAME else subgroup_full_path for arg in BASE_ARGS
]
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.repos.setup.as_name_tuple(),
*base_args,
*TEMPLATE_ORG_ARG,
*MASTER_REPOS_ARG,
*STUDENTS_ARG,
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert_repos_exist(
STUDENT_TEAMS, assignment_names, org_name=subgroup_full_path
)
def test_setup_twice(self, extra_args):
"""Setting up twice should have the same effect as setting up once."""
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.repos.setup.as_name_tuple(),
*BASE_ARGS,
*TEMPLATE_ORG_ARG,
*MASTER_REPOS_ARG,
*STUDENTS_ARG,
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert_repos_exist(STUDENT_TEAMS, assignment_names)
assert_on_groups(STUDENT_TEAMS)
def test_setup_with_token_owner_as_student(self, extra_args):
"""Setting up with the token owner as a student should not cause
a crash (see #812)
"""
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.repos.setup.as_name_tuple(),
*BASE_ARGS,
*TEMPLATE_ORG_ARG,
*MASTER_REPOS_ARG,
"--students",
TEACHER,
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert_repos_exist(
[plug.StudentTeam(members=[TEACHER])], assignment_names
)
def test_setup_with_default_branch_protection_does_not_carry_over(
self, extra_args
):
"""Student repositories created when global default branch
protection is enabled on the GitLab instance, should still not have
default branch protection.
"""
# arrange
gl = gitlab.Gitlab(
url=LOCAL_BASE_URL, private_token=ADMIN_TOKEN, ssl_verify=False
)
gl.auth()
settings = gl.settings.get()
settings.default_branch_protection = (
_repobee.ext.gitlab.DefaultBranchProtection.FULL.value
)
settings.save()
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.repos.setup.as_name_tuple(),
*BASE_ARGS,
*TEMPLATE_ORG_ARG,
*MASTER_REPOS_ARG,
*STUDENTS_ARG,
]
)
# act
result = run_in_docker_with_coverage(command, extra_args=extra_args)
# assert
assert result.returncode == 0
api = api_instance(ORG_NAME)
loop_ran = False
for repo in api.get_repos():
loop_ran = True
assert not repo.implementation.protectedbranches.list()
assert loop_ran, "assertion loop did not execute"
@pytest.mark.filterwarnings("ignore:.*Unverified HTTPS request.*")
class TestUpdate:
"""Integration tests for the update command."""
def test_happy_path(self, with_student_repos, extra_args):
master_repo = assignment_names[0]
filename = "superfile.super"
text = "some epic content\nfor this file!"
update_repo(master_repo, filename, text)
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.repos.update.as_name_tuple(),
*TEMPLATE_ORG_ARG,
*BASE_ARGS,
"-a",
master_repo,
*STUDENTS_ARG,
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert_repos_contain(STUDENT_TEAMS, [master_repo], filename, text)
def test_opens_issue_if_update_rejected(
self, tmpdir, with_student_repos, extra_args
):
master_repo = assignment_names[0]
conflict_repo = plug.generate_repo_name(STUDENT_TEAMS[0], master_repo)
filename = "superfile.super"
text = "some epic content\nfor this file!"
# update the master repo
update_repo(master_repo, filename, text)
# conflicting update in the student repo
update_repo(conflict_repo, "somefile.txt", "some other content")
issue = plug.Issue(title="Oops, push was rejected!", body="")
issue_file = pathlib.Path(str(tmpdir)) / "issue.md"
issue_file.write_text(issue.title)
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.repos.update.as_name_tuple(),
*TEMPLATE_ORG_ARG,
*BASE_ARGS,
"-a",
master_repo,
*STUDENTS_ARG,
"--issue",
issue_file.name,
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert_repos_contain(STUDENT_TEAMS[1:], [master_repo], filename, text)
assert_issues_exist(STUDENT_TEAMS[0:1], [master_repo], issue)
@pytest.mark.filterwarnings("ignore:.*Unverified HTTPS request.*")
class TestMigrate:
"""Integration tests for the migrate command."""
@pytest.fixture
def local_master_repos(self, restore, extra_args):
"""Clone the master repos to disk. The restore fixture is explicitly
included as it must be run before this fixture.
"""
api = api_instance(TEMPLATE_ORG_NAME)
template_repo_urls = [
api.insert_auth(url).replace(LOCAL_DOMAIN, BASE_DOMAIN)
for url in api.get_repo_urls(assignment_names)
]
# clone the master repos to disk first first
git_commands = [
"git clone {}".format(url) for url in template_repo_urls
]
result = run_in_docker(
" && ".join(git_commands), extra_args=extra_args
)
assert result.returncode == 0
return assignment_names
def test_happy_path(self, local_master_repos, extra_args):
"""Migrate a few repos from the existing master repo into the target
organization.
"""
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.repos.migrate.as_name_tuple(),
*BASE_ARGS,
*MASTER_REPOS_ARG,
"--allow-local-templates",
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert_template_repos_exist(local_master_repos, ORG_NAME)
@pytest.mark.filterwarnings("ignore:.*Unverified HTTPS request.*")
class TestOpenIssues:
"""Tests for the open-issues command."""
_ISSUE = plug.Issue(title="This is a title", body="This is a body")
def test_happy_path(self, tmpdir_volume_arg, tmpdir, extra_args):
"""Test opening an issue in each student repo."""
filename = "issue.md"
text = "{}\n{}".format(self._ISSUE.title, self._ISSUE.body)
tmpdir.join(filename).write_text(text, encoding="utf-8")
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.issues.open.as_name_tuple(),
*BASE_ARGS,
*MASTER_REPOS_ARG,
*STUDENTS_ARG,
"-i",
"{}/{}".format(VOLUME_DST, filename),
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert_num_issues(STUDENT_TEAMS, assignment_names, 1)
assert_issues_exist(STUDENT_TEAMS, assignment_names, self._ISSUE)
@pytest.mark.filterwarnings("ignore:.*Unverified HTTPS request.*")
class TestCloseIssues:
"""Tests for the close-issues command."""
def test_closes_only_matched_issues(self, open_issues, extra_args):
"""Test that close-issues respects the regex."""
assert len(open_issues) == 2, "expected there to be only 2 open issues"
close_issue = open_issues[0]
open_issue = open_issues[1]
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.issues.close.as_name_tuple(),
*BASE_ARGS,
*MASTER_REPOS_ARG,
*STUDENTS_ARG,
"-r",
close_issue.title,
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert_issues_exist(
STUDENT_TEAM_NAMES,
assignment_names,
close_issue,
expected_state="closed",
)
assert_issues_exist(
STUDENT_TEAM_NAMES,
assignment_names,
open_issue,
expected_state="opened",
)
@pytest.mark.filterwarnings("ignore:.*Unverified HTTPS request.*")
class TestListIssues:
"""Tests for the list-issues command."""
@pytest.mark.parametrize("discover_repos", [False, True])
def test_lists_matching_issues(
self, open_issues, extra_args, discover_repos
):
# arrange
assert len(open_issues) == 2, "expected there to be only 2 open issues"
matched = open_issues[0]
unmatched = open_issues[1]
repo_names = plug.generate_repo_names(STUDENT_TEAMS, assignment_names)
issue_pattern_template = r"^.*{}/#\d:\s+{}.*by {}.?$"
expected_issue_output_patterns = [
issue_pattern_template.format(repo_name, matched.title, TEACHER)
for repo_name in repo_names
]
unexpected_issue_output_patterns = [
issue_pattern_template.format(repo_name, unmatched.title, TEACHER)
for repo_name in repo_names
] + [
r"\[ERROR\]"
] # any kind of error is bad
repo_arg = ["--discover-repos"] if discover_repos else MASTER_REPOS_ARG
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.issues.list.as_name_tuple(),
*BASE_ARGS,
*repo_arg,
*STUDENTS_ARG,
"-r",
matched.title,
]
)
# act
result = run_in_docker_with_coverage(command, extra_args=extra_args)
output = result.stdout.decode("utf-8")
# assert
assert result.returncode == 0
search_flags = re.MULTILINE
for expected_pattern in expected_issue_output_patterns:
assert re.search(expected_pattern, output, search_flags)
for unexpected_pattern in unexpected_issue_output_patterns:
assert not re.search(unexpected_pattern, output, search_flags)
@pytest.mark.filterwarnings("ignore:.*Unverified HTTPS request.*")
class TestAssignReviews:
"""Tests for the assign-reviews command."""
def test_assign_one_review(self, with_student_repos, extra_args):
assignment_name = assignment_names[1]
expected_review_teams = [
plug.StudentTeam(
members=[],
name=plug.generate_review_team_name(
student_team_name, assignment_name
),
)
for student_team_name in STUDENT_TEAM_NAMES
]
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.reviews.assign.as_name_tuple(),
*BASE_ARGS,
"-a",
assignment_name,
*STUDENTS_ARG,
"-n",
"1",
]
)
group_assertion = expected_num_members_group_assertion(
expected_num_members=1
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert_on_groups(
expected_review_teams, single_group_assertion=group_assertion
)
assert_num_issues(STUDENT_TEAMS, [assignment_name], 1)
assert_issues_exist(
STUDENT_TEAMS,
[assignment_name],
_repobee.command.peer.DEFAULT_REVIEW_ISSUE,
expected_num_asignees=1,
)
def test_assign_to_nonexisting_students(
self, with_student_repos, extra_args
):
"""If you try to assign reviews where one or more of the allocated
student repos don't exist, there should be an error.
"""
assignment_name = assignment_names[1]
non_existing_group = "non-existing-group"
student_team_names = STUDENT_TEAM_NAMES + [non_existing_group]
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.reviews.assign.as_name_tuple(),
*BASE_ARGS_NO_TB,
"-a",
assignment_name,
"-s",
*student_team_names,
"-n",
"1",
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
output = result.stdout.decode("utf-8")
assert (
"[ERROR] NotFoundError: Can't find repos: {}".format(
plug.generate_repo_name(non_existing_group, assignment_name)
)
in output
)
assert result.returncode == 1
assert_num_issues(STUDENT_TEAMS, [assignment_name], 0)
@pytest.mark.filterwarnings("ignore:.*Unverified HTTPS request.*")
class TestEndReviews:
def test_end_all_reviews(self, with_reviews, extra_args):
assignment_name, review_teams = with_reviews
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.reviews.end.as_name_tuple(),
*BASE_ARGS,
"-a",
assignment_name,
*STUDENTS_ARG,
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
def assert_no_actual_groups(expected, actual):
assert not actual
assert result.returncode == 0
# student teams should still exist
assert_on_groups(STUDENT_TEAMS)
# review teams should not
assert_on_groups(
review_teams, all_groups_assertion=assert_no_actual_groups
)
def test_end_non_existing_reviews(self, with_reviews, extra_args):
_, review_teams = with_reviews
assignment_name = assignment_names[0]
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.reviews.end.as_name_tuple(),
*BASE_ARGS,
"-a",
assignment_name,
*STUDENTS_ARG,
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
assert result.returncode == 0
assert_on_groups(STUDENT_TEAMS)
assert_on_groups(
review_teams,
single_group_assertion=expected_num_members_group_assertion(1),
)
class TestCheckReviews:
"""Tests for check-reviews command."""
def test_no_reviews_opened(self, with_reviews, extra_args):
assignment_name, _ = with_reviews
num_reviews = 0
num_expected_reviews = 1
assignment_name = assignment_names[1]
pattern_template = r"{}.*{}.*{}.*\w+-{}.*"
expected_output_patterns = [
pattern_template.format(
team_name,
str(num_reviews),
str(num_expected_reviews - num_reviews),
assignment_name,
)
for team_name in STUDENT_TEAM_NAMES
]
unexpected_output_patterns = [r"\[ERROR\]"]
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.reviews.check.as_name_tuple(),
*BASE_ARGS,
"-a",
assignment_name,
*STUDENTS_ARG,
"--num-reviews",
str(num_expected_reviews),
"--title-regex",
"Review",
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
output = result.stdout.decode("utf-8")
assert result.returncode == 0
search_flags = re.MULTILINE
for expected_pattern in expected_output_patterns:
assert re.search(expected_pattern, output, search_flags)
for unexpected_pattern in unexpected_output_patterns:
assert not re.search(unexpected_pattern, output, search_flags)
def test_expect_too_many_reviews(self, with_reviews, extra_args):
"""Test that warnings are printed if a student is assigned to fewer
review teams than expected.
"""
assignment_name, _ = with_reviews
num_reviews = 0
actual_assigned_reviews = 1
num_expected_reviews = 2
assignment_name = assignment_names[1]
warning_template = (
r"\[WARNING\] Expected {} to be assigned to {} review teams, but "
"found {}. Review teams may have been tampered with."
)
pattern_template = r"{}.*{}.*{}.*\w+-{}.*"
expected_output_patterns = [
pattern_template.format(
team_name,
str(num_reviews),
str(actual_assigned_reviews - num_reviews),
assignment_name,
)
for team_name in STUDENT_TEAM_NAMES
] + [
warning_template.format(
team_name,
str(num_expected_reviews),
str(actual_assigned_reviews),
)
for team_name in STUDENT_TEAM_NAMES
]
unexpected_output_patterns = [r"\[ERROR\]"]
command = " ".join(
[
REPOBEE_GITLAB,
*repobee_plug.cli.CoreCommand.reviews.check.as_name_tuple(),
*BASE_ARGS,
"-a",
assignment_name,
*STUDENTS_ARG,
"--num-reviews",
str(num_expected_reviews),
"--title-regex",
"Review",
]
)
result = run_in_docker_with_coverage(command, extra_args=extra_args)
output = result.stdout.decode("utf-8")
assert result.returncode == 0
search_flags = re.MULTILINE
for expected_pattern in expected_output_patterns:
assert re.search(expected_pattern, output, search_flags)
for unexpected_pattern in unexpected_output_patterns:
assert not re.search(unexpected_pattern, output, search_flags)
|
py | b412c68fb05fca6bb2b0f7ca013344737d6aafcf | __all__ = ['__version__']
__version__ = '0.0.2'
|
py | b412c6fc11f2bb40256d1ea31bdbab1fad58224b | #
# PySNMP MIB module OLD-CISCO-INTERFACES-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/OLD-CISCO-INTERFACES-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:08:35 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion")
local, = mibBuilder.importSymbols("CISCO-SMI", "local")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
iso, Gauge32, MibIdentifier, Counter64, NotificationType, Unsigned32, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, ModuleIdentity, Bits, Integer32, ObjectIdentity, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Gauge32", "MibIdentifier", "Counter64", "NotificationType", "Unsigned32", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "ModuleIdentity", "Bits", "Integer32", "ObjectIdentity", "Counter32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
linterfaces = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 2, 2))
lifTable = MibTable((1, 3, 6, 1, 4, 1, 9, 2, 2, 1), )
if mibBuilder.loadTexts: lifTable.setStatus('mandatory')
lifEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: lifEntry.setStatus('mandatory')
locIfHardType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfHardType.setStatus('mandatory')
locIfLineProt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfLineProt.setStatus('mandatory')
locIfLastIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfLastIn.setStatus('mandatory')
locIfLastOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfLastOut.setStatus('mandatory')
locIfLastOutHang = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfLastOutHang.setStatus('mandatory')
locIfInBitsSec = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfInBitsSec.setStatus('mandatory')
locIfInPktsSec = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfInPktsSec.setStatus('mandatory')
locIfOutBitsSec = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfOutBitsSec.setStatus('mandatory')
locIfOutPktsSec = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfOutPktsSec.setStatus('mandatory')
locIfInRunts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfInRunts.setStatus('mandatory')
locIfInGiants = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfInGiants.setStatus('mandatory')
locIfInCRC = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfInCRC.setStatus('mandatory')
locIfInFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfInFrame.setStatus('mandatory')
locIfInOverrun = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfInOverrun.setStatus('mandatory')
locIfInIgnored = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfInIgnored.setStatus('mandatory')
locIfInAbort = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfInAbort.setStatus('mandatory')
locIfResets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfResets.setStatus('mandatory')
locIfRestarts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfRestarts.setStatus('mandatory')
locIfKeep = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfKeep.setStatus('mandatory')
locIfReason = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 20), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfReason.setStatus('mandatory')
locIfCarTrans = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 21), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfCarTrans.setStatus('mandatory')
locIfReliab = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 22), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfReliab.setStatus('mandatory')
locIfDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 23), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfDelay.setStatus('mandatory')
locIfLoad = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 24), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfLoad.setStatus('mandatory')
locIfCollisions = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 25), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfCollisions.setStatus('mandatory')
locIfInputQueueDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 26), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfInputQueueDrops.setStatus('mandatory')
locIfOutputQueueDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 27), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfOutputQueueDrops.setStatus('mandatory')
locIfDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 28), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: locIfDescr.setStatus('mandatory')
locIfSlowInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfSlowInPkts.setStatus('mandatory')
locIfSlowOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfSlowOutPkts.setStatus('mandatory')
locIfSlowInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 32), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfSlowInOctets.setStatus('mandatory')
locIfSlowOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 33), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfSlowOutOctets.setStatus('mandatory')
locIfFastInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 34), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfFastInPkts.setStatus('mandatory')
locIfFastOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 35), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfFastOutPkts.setStatus('mandatory')
locIfFastInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 36), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfFastInOctets.setStatus('mandatory')
locIfFastOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 37), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfFastOutOctets.setStatus('mandatory')
locIfotherInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 38), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfotherInPkts.setStatus('mandatory')
locIfotherOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 39), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfotherOutPkts.setStatus('mandatory')
locIfotherInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 40), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfotherInOctets.setStatus('mandatory')
locIfotherOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 41), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfotherOutOctets.setStatus('mandatory')
locIfipInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 42), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfipInPkts.setStatus('mandatory')
locIfipOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 43), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfipOutPkts.setStatus('mandatory')
locIfipInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 44), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfipInOctets.setStatus('mandatory')
locIfipOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 45), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfipOutOctets.setStatus('mandatory')
locIfdecnetInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 46), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfdecnetInPkts.setStatus('mandatory')
locIfdecnetOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 47), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfdecnetOutPkts.setStatus('mandatory')
locIfdecnetInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 48), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfdecnetInOctets.setStatus('mandatory')
locIfdecnetOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 49), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfdecnetOutOctets.setStatus('mandatory')
locIfxnsInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 50), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfxnsInPkts.setStatus('mandatory')
locIfxnsOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 51), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfxnsOutPkts.setStatus('mandatory')
locIfxnsInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 52), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfxnsInOctets.setStatus('mandatory')
locIfxnsOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 53), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfxnsOutOctets.setStatus('mandatory')
locIfclnsInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 54), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfclnsInPkts.setStatus('mandatory')
locIfclnsOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 55), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfclnsOutPkts.setStatus('mandatory')
locIfclnsInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 56), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfclnsInOctets.setStatus('mandatory')
locIfclnsOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 57), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfclnsOutOctets.setStatus('mandatory')
locIfappletalkInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 58), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfappletalkInPkts.setStatus('mandatory')
locIfappletalkOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 59), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfappletalkOutPkts.setStatus('mandatory')
locIfappletalkInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 60), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfappletalkInOctets.setStatus('mandatory')
locIfappletalkOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 61), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfappletalkOutOctets.setStatus('mandatory')
locIfnovellInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 62), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfnovellInPkts.setStatus('mandatory')
locIfnovellOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 63), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfnovellOutPkts.setStatus('mandatory')
locIfnovellInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 64), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfnovellInOctets.setStatus('mandatory')
locIfnovellOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 65), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfnovellOutOctets.setStatus('mandatory')
locIfapolloInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 66), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfapolloInPkts.setStatus('mandatory')
locIfapolloOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 67), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfapolloOutPkts.setStatus('mandatory')
locIfapolloInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 68), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfapolloInOctets.setStatus('mandatory')
locIfapolloOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 69), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfapolloOutOctets.setStatus('mandatory')
locIfvinesInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 70), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfvinesInPkts.setStatus('mandatory')
locIfvinesOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 71), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfvinesOutPkts.setStatus('mandatory')
locIfvinesInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 72), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfvinesInOctets.setStatus('mandatory')
locIfvinesOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 73), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfvinesOutOctets.setStatus('mandatory')
locIfbridgedInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 74), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfbridgedInPkts.setStatus('mandatory')
locIfbridgedOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 75), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfbridgedOutPkts.setStatus('mandatory')
locIfbridgedInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 76), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfbridgedInOctets.setStatus('mandatory')
locIfbridgedOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 77), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfbridgedOutOctets.setStatus('mandatory')
locIfsrbInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 78), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfsrbInPkts.setStatus('mandatory')
locIfsrbOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 79), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfsrbOutPkts.setStatus('mandatory')
locIfsrbInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 80), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfsrbInOctets.setStatus('mandatory')
locIfsrbOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 81), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfsrbOutOctets.setStatus('mandatory')
locIfchaosInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 82), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfchaosInPkts.setStatus('mandatory')
locIfchaosOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 83), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfchaosOutPkts.setStatus('mandatory')
locIfchaosInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 84), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfchaosInOctets.setStatus('mandatory')
locIfchaosOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 85), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfchaosOutOctets.setStatus('mandatory')
locIfpupInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 86), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfpupInPkts.setStatus('mandatory')
locIfpupOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 87), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfpupOutPkts.setStatus('mandatory')
locIfpupInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 88), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfpupInOctets.setStatus('mandatory')
locIfpupOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 89), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfpupOutOctets.setStatus('mandatory')
locIfmopInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 90), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfmopInPkts.setStatus('mandatory')
locIfmopOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 91), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfmopOutPkts.setStatus('mandatory')
locIfmopInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 92), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfmopInOctets.setStatus('mandatory')
locIfmopOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 93), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfmopOutOctets.setStatus('mandatory')
locIflanmanInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 94), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIflanmanInPkts.setStatus('mandatory')
locIflanmanOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 95), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIflanmanOutPkts.setStatus('mandatory')
locIflanmanInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 96), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIflanmanInOctets.setStatus('mandatory')
locIflanmanOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 97), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIflanmanOutOctets.setStatus('mandatory')
locIfstunInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 98), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfstunInPkts.setStatus('mandatory')
locIfstunOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 99), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfstunOutPkts.setStatus('mandatory')
locIfstunInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 100), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfstunInOctets.setStatus('mandatory')
locIfstunOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 101), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfstunOutOctets.setStatus('mandatory')
locIfspanInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 102), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfspanInPkts.setStatus('mandatory')
locIfspanOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 103), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfspanOutPkts.setStatus('mandatory')
locIfspanInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 104), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfspanInOctets.setStatus('mandatory')
locIfspanOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 105), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfspanOutOctets.setStatus('mandatory')
locIfarpInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 106), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfarpInPkts.setStatus('mandatory')
locIfarpOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 107), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfarpOutPkts.setStatus('mandatory')
locIfarpInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 108), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfarpInOctets.setStatus('mandatory')
locIfarpOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 109), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfarpOutOctets.setStatus('mandatory')
locIfprobeInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 110), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfprobeInPkts.setStatus('mandatory')
locIfprobeOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 111), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfprobeOutPkts.setStatus('mandatory')
locIfprobeInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 112), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfprobeInOctets.setStatus('mandatory')
locIfprobeOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 113), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfprobeOutOctets.setStatus('mandatory')
locIfDribbleInputs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 1, 1, 114), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfDribbleInputs.setStatus('mandatory')
lFSIPTable = MibTable((1, 3, 6, 1, 4, 1, 9, 2, 2, 2), )
if mibBuilder.loadTexts: lFSIPTable.setStatus('mandatory')
lFSIPEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 2, 2, 2, 1), ).setIndexNames((0, "OLD-CISCO-INTERFACES-MIB", "locIfFSIPIndex"))
if mibBuilder.loadTexts: lFSIPEntry.setStatus('mandatory')
locIfFSIPIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfFSIPIndex.setStatus('mandatory')
locIfFSIPtype = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notAvailable", 1), ("dte", 2), ("dce", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfFSIPtype.setStatus('mandatory')
locIfFSIPrts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notAvailable", 1), ("up", 2), ("down", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfFSIPrts.setStatus('mandatory')
locIfFSIPcts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notAvailable", 1), ("up", 2), ("down", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfFSIPcts.setStatus('mandatory')
locIfFSIPdtr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notAvailable", 1), ("up", 2), ("down", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfFSIPdtr.setStatus('mandatory')
locIfFSIPdcd = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notAvailable", 1), ("up", 2), ("down", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfFSIPdcd.setStatus('mandatory')
locIfFSIPdsr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notAvailable", 1), ("up", 2), ("down", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfFSIPdsr.setStatus('mandatory')
locIfFSIPrxClockrate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfFSIPrxClockrate.setStatus('mandatory')
locIfFSIPrxClockrateHi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfFSIPrxClockrateHi.setStatus('mandatory')
locIfFSIPportType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 2, 2, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("noCable", 1), ("rs232", 2), ("rs422", 3), ("rs423", 4), ("v35", 5), ("x21", 6), ("rs449", 7), ("rs530", 8), ("hssi", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: locIfFSIPportType.setStatus('mandatory')
mibBuilder.exportSymbols("OLD-CISCO-INTERFACES-MIB", locIfLastOutHang=locIfLastOutHang, locIfapolloOutPkts=locIfapolloOutPkts, locIfxnsOutOctets=locIfxnsOutOctets, locIfFastInOctets=locIfFastInOctets, locIfLastIn=locIfLastIn, lFSIPTable=lFSIPTable, locIfInAbort=locIfInAbort, locIfclnsOutPkts=locIfclnsOutPkts, locIfprobeOutOctets=locIfprobeOutOctets, locIfchaosOutOctets=locIfchaosOutOctets, locIfSlowOutOctets=locIfSlowOutOctets, locIfOutPktsSec=locIfOutPktsSec, locIfInRunts=locIfInRunts, linterfaces=linterfaces, locIfxnsInPkts=locIfxnsInPkts, locIfLastOut=locIfLastOut, locIfHardType=locIfHardType, locIfReliab=locIfReliab, locIfotherInPkts=locIfotherInPkts, locIfspanInOctets=locIfspanInOctets, locIfOutputQueueDrops=locIfOutputQueueDrops, locIfInputQueueDrops=locIfInputQueueDrops, locIfvinesOutPkts=locIfvinesOutPkts, locIfarpInOctets=locIfarpInOctets, locIfsrbInPkts=locIfsrbInPkts, locIflanmanOutPkts=locIflanmanOutPkts, locIfFSIPportType=locIfFSIPportType, locIfFastOutPkts=locIfFastOutPkts, locIfReason=locIfReason, locIfarpInPkts=locIfarpInPkts, locIfipOutOctets=locIfipOutOctets, locIfprobeInOctets=locIfprobeInOctets, locIfSlowOutPkts=locIfSlowOutPkts, locIflanmanInPkts=locIflanmanInPkts, locIfapolloOutOctets=locIfapolloOutOctets, locIfmopInPkts=locIfmopInPkts, locIfFSIPdsr=locIfFSIPdsr, locIfnovellOutPkts=locIfnovellOutPkts, locIfvinesInOctets=locIfvinesInOctets, locIfvinesOutOctets=locIfvinesOutOctets, locIfFSIPdcd=locIfFSIPdcd, locIfipInPkts=locIfipInPkts, locIfInIgnored=locIfInIgnored, locIfbridgedInOctets=locIfbridgedInOctets, lifTable=lifTable, locIfotherOutOctets=locIfotherOutOctets, locIfnovellInOctets=locIfnovellInOctets, locIfvinesInPkts=locIfvinesInPkts, locIfipOutPkts=locIfipOutPkts, locIfInOverrun=locIfInOverrun, locIfbridgedOutOctets=locIfbridgedOutOctets, locIfFastOutOctets=locIfFastOutOctets, locIfmopOutOctets=locIfmopOutOctets, locIfKeep=locIfKeep, locIfLoad=locIfLoad, locIfappletalkOutPkts=locIfappletalkOutPkts, locIfCollisions=locIfCollisions, locIflanmanOutOctets=locIflanmanOutOctets, locIfxnsInOctets=locIfxnsInOctets, locIfCarTrans=locIfCarTrans, locIfappletalkOutOctets=locIfappletalkOutOctets, locIfarpOutOctets=locIfarpOutOctets, locIfSlowInPkts=locIfSlowInPkts, locIfprobeInPkts=locIfprobeInPkts, locIfpupOutOctets=locIfpupOutOctets, locIfpupOutPkts=locIfpupOutPkts, locIfFSIPcts=locIfFSIPcts, locIfdecnetOutOctets=locIfdecnetOutOctets, locIfmopOutPkts=locIfmopOutPkts, locIfFSIPrts=locIfFSIPrts, locIfnovellOutOctets=locIfnovellOutOctets, locIfstunInOctets=locIfstunInOctets, locIfDescr=locIfDescr, locIfappletalkInPkts=locIfappletalkInPkts, locIfdecnetInPkts=locIfdecnetInPkts, locIfxnsOutPkts=locIfxnsOutPkts, locIfOutBitsSec=locIfOutBitsSec, locIfbridgedInPkts=locIfbridgedInPkts, locIfotherInOctets=locIfotherInOctets, locIfclnsInOctets=locIfclnsInOctets, locIfsrbInOctets=locIfsrbInOctets, locIfchaosOutPkts=locIfchaosOutPkts, locIfstunOutPkts=locIfstunOutPkts, locIfFastInPkts=locIfFastInPkts, locIfInPktsSec=locIfInPktsSec, locIfpupInPkts=locIfpupInPkts, locIfLineProt=locIfLineProt, locIfclnsOutOctets=locIfclnsOutOctets, locIfDribbleInputs=locIfDribbleInputs, locIfapolloInOctets=locIfapolloInOctets, locIfFSIPtype=locIfFSIPtype, locIfFSIPrxClockrate=locIfFSIPrxClockrate, locIfprobeOutPkts=locIfprobeOutPkts, locIfstunInPkts=locIfstunInPkts, locIfsrbOutOctets=locIfsrbOutOctets, locIfpupInOctets=locIfpupInOctets, locIfInFrame=locIfInFrame, locIfarpOutPkts=locIfarpOutPkts, locIfFSIPIndex=locIfFSIPIndex, locIfDelay=locIfDelay, locIfipInOctets=locIfipInOctets, locIfInGiants=locIfInGiants, locIfspanOutOctets=locIfspanOutOctets, locIfchaosInOctets=locIfchaosInOctets, locIfSlowInOctets=locIfSlowInOctets, locIfotherOutPkts=locIfotherOutPkts, lFSIPEntry=lFSIPEntry, locIfstunOutOctets=locIfstunOutOctets, locIfFSIPrxClockrateHi=locIfFSIPrxClockrateHi, locIfResets=locIfResets, locIflanmanInOctets=locIflanmanInOctets, locIfapolloInPkts=locIfapolloInPkts, locIfdecnetInOctets=locIfdecnetInOctets, locIfspanInPkts=locIfspanInPkts, locIfbridgedOutPkts=locIfbridgedOutPkts, locIfappletalkInOctets=locIfappletalkInOctets, locIfdecnetOutPkts=locIfdecnetOutPkts, locIfchaosInPkts=locIfchaosInPkts, locIfRestarts=locIfRestarts, locIfFSIPdtr=locIfFSIPdtr, locIfclnsInPkts=locIfclnsInPkts, locIfsrbOutPkts=locIfsrbOutPkts, locIfmopInOctets=locIfmopInOctets, lifEntry=lifEntry, locIfspanOutPkts=locIfspanOutPkts, locIfInCRC=locIfInCRC, locIfInBitsSec=locIfInBitsSec, locIfnovellInPkts=locIfnovellInPkts)
|
py | b412c82bd7ebff089ff7ef3f54333e99554bc4d0 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import unittest
from contextlib import contextmanager
from pants.util.contextutil import environment_as, temporary_dir
from pants.util.dirutil import chmod_plus_x, touch
from pants.contrib.cpp.toolchain.cpp_toolchain import CppToolchain
class CppToolchainTest(unittest.TestCase):
@contextmanager
def tool(self, name):
with temporary_dir() as tool_root:
tool_path = os.path.join(tool_root, name)
touch(tool_path)
chmod_plus_x(tool_path)
new_path = os.pathsep.join([tool_root] + os.environ.get("PATH", "").split(os.pathsep))
with environment_as(PATH=new_path):
yield tool_path
def test_default_compiler_from_environ(self):
with self.tool("g++") as tool_path:
with environment_as(CXX="g++"):
self.assertEqual(CppToolchain().compiler, tool_path)
self.assertEqual(
CppToolchain().compiler,
CppToolchain().register_tool(name="compiler", tool=tool_path),
)
def test_invalid_compiler(self):
cpp_toolchain = CppToolchain(compiler="not-a-command")
with self.assertRaises(CppToolchain.Error):
cpp_toolchain.compiler
def test_tool_registration(self):
with self.tool("good-tool") as tool_path:
self.assertEqual(tool_path, CppToolchain().register_tool(name="foo", tool="good-tool"))
def test_invalid_tool_registration(self):
with self.assertRaises(CppToolchain.Error):
CppToolchain().register_tool("not-a-command")
|
py | b412c97348be2fbc2726fff20a51ab511fce1aa4 | #!/usr/bin/env python
"""
Example of a practical use for the cpmd_utils.py module:
Read information from CPMD Trajectory.
Eventually calculate statistical quantities.
Plot data.
"""
import sys
import os
import matplotlib.pyplot as plt
from comp_chem_py.cpmd_utils import read_standard_file, read_ENERGIES
def get_max_traj(f):
fmax = 0.0
natoms = f.shape[0]
for i in range(natoms):
# get norm of force vector
ftmp = np.linalg.norm(f[i,:])
if ftmp > fmax:
fmax = ftmp
return fmax
def get_mean_traj(f):
fmean = 0.0
natoms = f.shape[0]
for i in range(natoms):
# get norm of force vector
fmean += np.linalg.norm(f[i,:])
return fmean/natoms
# calculation directory
dr = sys.argv[1]+'/'
fig, axarr = plt.subplots(2, sharex=True)
try:
ts = float(sys.argv[2])
steps = np.arange(0.0, ts*100000, ts)
print 'steps from input:',len(steps),steps[-1]
except:
# extract time information from cpmd.out
fn = sys.argv[1]+'.out'
steps = [0.0]
tstp = [0.0]
with open(dr+fn, 'r') as cpmd:
for line in cpmd:
if 'New value for dt ions' in line:
tstp.append( float( line.split()[-1] ) )
steps.append( steps[-1] + tstp[-1] )
print 'steps from file:',len(steps),steps[-1]
axarr[1].plot(steps, tstp, label='Time step' )
# ENERGIES
dic = read_ENERGIES(dr+'ENERGIES', ['E_KS', 'E_cla'])
n = len(dic['E_cla'])
axarr[0].axhline(dic['E_cla'][0], color='k')
axarr[0].plot(steps[:n], dic['E_cla'], label='Total (conserved) energy')
#axarr[0].plot(dic['E_KS'], label='Potential energy')
axarr[0].set_ylabel('Energies [a.u.]')
axarr[0].legend()
info = read_standard_file(dr+'SH_ENERG.dat')[1]
axarr[0].plot(steps[:n], info[:n,0], label='S_0')
axarr[0].plot(steps[:n], info[:n,1], '+', label='S_1')
axarr[0].plot(steps[:n], info[:n,2], label='S_2')
axarr[0].plot(steps[:n], info[:n,3], label='S_3')
## VELOCITIES AND FORCES
#veloc, forces = read_FTRAJECTORY(dr+'FTRAJECTORY')[2:4]
#
#fmax = []
#fmean = []
#vmax = []
#vmean = []
#for v, f in zip(veloc, forces):
# fmax.append(get_max_traj(f))
# fmean.append(get_mean_traj(f))
#
# vmax.append(get_max_traj(v))
# vmean.append(get_mean_traj(v))
#
#axarr[1].plot(fmax, label='max force')
#axarr[1].plot(fmean, label='mean force')
#axarr[1].set_ylabel('Forces [a.u.]')
#axarr[1].legend()
#
#axarr[2].plot(vmax, label='max velocity')
#axarr[2].plot(vmean, label='mean velocity')
#axarr[2].set_ylabel('Velocity [a.u.]')
#axarr[2].legend()
#
#
## TRANSITION PROBABILITY
#probs = read_standard_file(dr+'SH_PROBS.dat')[1]
#
#pmax = probs.max(1)
#pmean = probs.mean(1)
#
#axarr[3].plot(pmax, label='max proba')
#axarr[3].plot(pmean, label='mean proba')
#axarr[3].set_ylabel('Transition probability')
#axarr[3].legend()
fig.subplots_adjust(hspace=0.0)
plt.show()
|
py | b412ca8af5eee00162e7ebf6b9cd3276e2a97fc1 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/clusterrole -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_clusterrole
short_description: Modify, and idempotently manage openshift clusterroles
description:
- Manage openshift clusterroles
options:
state:
description:
- Supported states, present, absent, list
- present - will ensure object is created or updated to the value specified
- list - will return a clusterrole
- absent - will remove a clusterrole
required: False
default: present
choices: ["present", 'absent', 'list']
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
rules:
description:
- A list of dictionaries that have the rule parameters.
- e.g. rules=[{'apiGroups': [""], 'attributeRestrictions': None, 'verbs': ['get'], 'resources': []}]
required: false
default: None
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: query a list of env vars on dc
oc_clusterrole:
name: myclusterrole
state: list
- name: Set the following variables.
oc_clusterrole:
name: myclusterrole
rules:
apiGroups:
- ""
attributeRestrictions: null
verbs: []
resources: []
'''
# -*- -*- -*- End included fragment: doc/clusterrole -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/rule.py -*- -*- -*-
class Rule(object):
'''class to represent a clusterrole rule
Example Rule Object's yaml:
- apiGroups:
- ""
attributeRestrictions: null
resources:
- persistentvolumes
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
'''
def __init__(self,
api_groups=None,
attr_restrictions=None,
resources=None,
verbs=None):
self.__api_groups = api_groups if api_groups is not None else [""]
self.__verbs = verbs if verbs is not None else []
self.__resources = resources if resources is not None else []
self.__attribute_restrictions = attr_restrictions if attr_restrictions is not None else None
@property
def verbs(self):
'''property for verbs'''
if self.__verbs is None:
return []
return self.__verbs
@verbs.setter
def verbs(self, data):
'''setter for verbs'''
self.__verbs = data
@property
def api_groups(self):
'''property for api_groups'''
if self.__api_groups is None:
return []
return self.__api_groups
@api_groups.setter
def api_groups(self, data):
'''setter for api_groups'''
self.__api_groups = data
@property
def resources(self):
'''property for resources'''
if self.__resources is None:
return []
return self.__resources
@resources.setter
def resources(self, data):
'''setter for resources'''
self.__resources = data
@property
def attribute_restrictions(self):
'''property for attribute_restrictions'''
return self.__attribute_restrictions
@attribute_restrictions.setter
def attribute_restrictions(self, data):
'''setter for attribute_restrictions'''
self.__attribute_restrictions = data
def add_verb(self, inc_verb):
'''add a verb to the verbs array'''
self.verbs.append(inc_verb)
def add_api_group(self, inc_apigroup):
'''add an api_group to the api_groups array'''
self.api_groups.append(inc_apigroup)
def add_resource(self, inc_resource):
'''add an resource to the resources array'''
self.resources.append(inc_resource)
def remove_verb(self, inc_verb):
'''add a verb to the verbs array'''
try:
self.verbs.remove(inc_verb)
return True
except ValueError:
pass
return False
def remove_api_group(self, inc_api_group):
'''add a verb to the verbs array'''
try:
self.api_groups.remove(inc_api_group)
return True
except ValueError:
pass
return False
def remove_resource(self, inc_resource):
'''add a verb to the verbs array'''
try:
self.resources.remove(inc_resource)
return True
except ValueError:
pass
return False
def __eq__(self, other):
'''return whether rules are equal'''
return (self.attribute_restrictions == other.attribute_restrictions and
self.api_groups == other.api_groups and
self.resources == other.resources and
self.verbs == other.verbs)
@staticmethod
def parse_rules(inc_rules):
'''create rules from an array'''
results = []
for rule in inc_rules:
results.append(Rule(rule.get('apiGroups', ['']),
rule.get('attributeRestrictions', None),
rule.get('resources', []),
rule.get('verbs', [])))
return results
# -*- -*- -*- End included fragment: lib/rule.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/clusterrole.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class ClusterRole(Yedit):
''' Class to model an openshift ClusterRole'''
rules_path = "rules"
def __init__(self, name=None, content=None):
''' Constructor for clusterrole '''
if content is None:
content = ClusterRole.builder(name).yaml_dict
super(ClusterRole, self).__init__(content=content)
self.__rules = Rule.parse_rules(self.get(ClusterRole.rules_path)) or []
@property
def rules(self):
return self.__rules
@rules.setter
def rules(self, data):
self.__rules = data
self.put(ClusterRole.rules_path, self.__rules)
def rule_exists(self, inc_rule):
'''attempt to find the inc_rule in the rules list'''
for rule in self.rules:
if rule == inc_rule:
return True
return False
def compare(self, other, verbose=False):
'''compare function for clusterrole'''
for rule in other.rules:
if rule not in self.rules:
if verbose:
print('Rule in other not found in self. [{}]'.format(rule))
return False
for rule in self.rules:
if rule not in other.rules:
if verbose:
print('Rule in self not found in other. [{}]'.format(rule))
return False
return True
@staticmethod
def builder(name='default_clusterrole', rules=None):
'''return a clusterrole with name and/or rules'''
if rules is None:
rules = [{'apiGroups': [""],
'attributeRestrictions': None,
'verbs': [],
'resources': []}]
content = {
'apiVersion': 'v1',
'kind': 'ClusterRole',
'metadata': {'name': '{}'.format(name)},
'rules': rules,
}
return ClusterRole(content=content)
# -*- -*- -*- End included fragment: lib/clusterrole.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_clusterrole.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCClusterRole(OpenShiftCLI):
''' Class to manage clusterrole objects'''
kind = 'clusterrole'
def __init__(self,
name,
rules=None,
kubeconfig=None,
verbose=False):
''' Constructor for OCClusterRole '''
super(OCClusterRole, self).__init__(None, kubeconfig=kubeconfig, verbose=verbose)
self.verbose = verbose
self.name = name
self._clusterrole = None
self._inc_clusterrole = ClusterRole.builder(name, rules)
@property
def clusterrole(self):
''' property for clusterrole'''
if self._clusterrole is None:
self.get()
return self._clusterrole
@clusterrole.setter
def clusterrole(self, data):
''' setter function for clusterrole property'''
self._clusterrole = data
@property
def inc_clusterrole(self):
''' property for inc_clusterrole'''
return self._inc_clusterrole
@inc_clusterrole.setter
def inc_clusterrole(self, data):
''' setter function for inc_clusterrole property'''
self._inc_clusterrole = data
def exists(self):
''' return whether a clusterrole exists '''
if self.clusterrole:
return True
return False
def get(self):
'''return a clusterrole '''
result = self._get(self.kind, self.name)
if result['returncode'] == 0:
self.clusterrole = ClusterRole(content=result['results'][0])
result['results'] = self.clusterrole.yaml_dict
elif '"{}" not found'.format(self.name) in result['stderr']:
result['returncode'] = 0
self.clusterrole = None
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.name)
def create(self):
'''create a clusterrole from the proposed incoming clusterrole'''
return self._create_from_content(self.name, self.inc_clusterrole.yaml_dict)
def update(self):
'''update a project'''
return self._replace_content(self.kind, self.name, self.inc_clusterrole.yaml_dict)
def needs_update(self):
''' verify an update is needed'''
return not self.clusterrole.compare(self.inc_clusterrole, self.verbose)
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params, check_mode):
'''run the idempotent ansible code'''
oc_clusterrole = OCClusterRole(params['name'],
params['rules'],
params['kubeconfig'],
params['debug'])
state = params['state']
api_rval = oc_clusterrole.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
if oc_clusterrole.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
api_rval = oc_clusterrole.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'state': state}
if state == 'present':
########
# Create
########
if not oc_clusterrole.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
# Create it here
api_rval = oc_clusterrole.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_clusterrole.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if oc_clusterrole.needs_update():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = oc_clusterrole.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_clusterrole.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'results': api_rval, 'state': state}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. [%s]' % state}
# -*- -*- -*- End included fragment: class/oc_clusterrole.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_clusterrole.py -*- -*- -*-
def main():
'''
ansible oc module for clusterrole
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
rules=dict(default=None, type='list'),
),
supports_check_mode=True,
)
results = OCClusterRole.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_clusterrole.py -*- -*- -*-
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.